file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
utils.py
|
from .products.builders import products
from .responses import response, make_identity, make_error
from .static.builders import codevalues
from .users.builders import users
def fill_cache(cache, values_dict):
"""
Fill a mock cache object with some keys and values.
"""
cache.get.side_effect = lambda k, d=None: values_dict.get(k, d)
def setup_responses(http, response_dict):
"""
Setup a mock http object with some responses to given
URLs. ``response_dict`` should map full URLs (including query string) to
the (response, content) tuple that will be returned (equivalent to the
return value of the httplib2.Http.request method).
"""
url_dict = dict((Url(k), v) for k, v in response_dict.iteritems())
def request(*args, **kwargs):
uri = Url(kwargs["uri"])
try:
return url_dict[uri]
except KeyError:
return response(
make_error(
"Mock got unexpected request URI: %s \n"
" -- Options are %s --" % (uri, response_dict.keys())
),
500)
http.request.side_effect = request
COMMON_RESPONSES = {
"http://fake.base/rest/companies/1?_type=json":
response(companies.one(
resourceIdentity=make_identity(id=1, url="companies/1"))),
"http://fake.base/rest/users?_type=json":
response(users.searchresult({})),
"http://fake.base/rest/users/current?_type=json":
response(users.one()),
"http://fake.base/rest/products?_type=json":
response(products.searchresult({})),
"http://fake.base/rest/environments?_type=json":
response(environments.searchresult({}, {})),
"http://fake.base/staticData/values/TESTCYCLESTATUS?_type=json":
response(codevalues.array(
{"description": "DRAFT", "id": 1},
{"description": "ACTIVE", "id": 2},
{"description": "LOCKED", "id": 3},
{"description": "CLOSED", "id": 4},
{"description": "DISCARDED", "id": 5},
)),
"http://fake.base/staticData/values/TESTRUNSTATUS?_type=json":
response(codevalues.array(
{"description": "DRAFT", "id": 1},
{"description": "ACTIVE", "id": 2},
{"description": "LOCKED", "id": 3},
{"description": "CLOSED", "id": 4},
{"description": "DISCARDED", "id": 5},
)),
"http://fake.base/staticData/values/TESTCASESTATUS?_type=json":
response(codevalues.array(
{"description": "DRAFT", "id": 1},
{"description": "ACTIVE", "id": 2},
{"description": "LOCKED", "id": 3},
{"description": "CLOSED", "id": 4},
{"description": "DISCARDED", "id": 5},
)),
"http://fake.base/staticData/values/TESTRUNRESULTSTATUS?_type=json":
response(codevalues.array(
{"description": "PENDING", "id": 1},
{"description": "PASSED", "id": 2},
{"description": "FAILED", "id": 3},
{"description": "BLOCKED", "id": 4},
{"description": "STARTED", "id": 5},
{"description": "INVALIDATED", "id": 6},
)),
"http://fake.base/staticData/values/APPROVALSTATUS?_type=json":
response(codevalues.array(
{"description": "PENDING", "id": 1},
{"description": "APPROVED", "id": 2},
{"description": "REJECTED", "id": 3},
)),
"http://fake.base/staticData/values/ATTACHMENTTYPE?_type=json":
response(codevalues.array(
{"description": "BRANDING", "id": 1},
{"description": "DESIGN", "id": 2},
{"description": "USERGUIDE", "id": 3},
{"description": "REQUIREMENTS", "id": 4},
{"description": "KNOWNISSUES", "id": 5},
{"description": "SCREENCAPTURE", "id": 6},
{"description": "NDA", "id": 7},
{"description": "UNSPECIFIED", "id": 8},
)),
}
def setup_common_responses(http, response_dict):
"""
A version of ``setup_responses`` intended for end-to-end request-response
testing. Automatically knows how to respond to the StaticCompanyMiddleware
query for the current company, and to static data requests.
"""
new_dict = COMMON_RESPONSES.copy()
new_dict.update(response_dict)
return setup_responses(http, new_dict)
@contextmanager
def locmem_cache():
cache = get_cache("django.core.cache.backends.locmem.LocMemCache")
cache.clear()
patcher = patch("ccui.core.cache.cache", cache)
patcher.start()
yield cache
patcher.stop()
class CachingFunctionalTestMixin(object):
def setUp(self):
self.cache = get_cache("django.core.cache.backends.locmem.LocMemCache")
self.cache.clear()
self.patcher = patch("ccui.core.cache.cache", self.cache)
self.patcher.start()
self.addCleanup(self.patcher.stop)
def creds(email, password=None, cookie=None):
from ccui.users.auth import UserCredentials
from ccui.users.models import User
creds = UserCredentials(email, password=password, cookie=cookie)
creds._user = User(email=email)
creds._user.auth = creds
creds._permission_codes = []
return creds
class AuthTestCase(TestCase):
def creds(self, email, password=None, cookie=None):
return creds(email, password, cookie)
@property
def auth(self):
"""
Since the server responses are mocked, we could just ignore auth when
not testing it specifically, but we include it for all requests to more
closely match real usage.
"""
return self.creds("admin@example.com", cookie="USERTOKEN: authcookie")
class ViewTestCase(AuthTestCase):
factory = RequestFactory()
def setUp(self):
self.rendered = {}
on_template_render = partial(store_rendered_templates, self.rendered)
template_rendered.connect(on_template_render)
self.addCleanup(template_rendered.disconnect, on_template_render)
def setup_responses(self, http, response_dict=None, user=None):
if user is None:
user = self.auth.user
if response_dict is None:
response_dict = {}
else:
response_dict = response_dict.copy()
response_dict.setdefault(
"http://fake.base/rest/users/current?_type=json",
response(
users.one(
email=user.email,
firstName=user.firstName,
lastName=user.lastName,
screenName=user.screenName
)
)
)
setup_common_responses(http, response_dict)
@property
def app(self):
class AuthWSGIHandler(WSGIHandler):
def get_response(self_, request):
request._cached_user = self.auth.user
request._cached_auth = self.auth
return super(AuthWSGIHandler, self_).get_response(request)
return TestApp(AuthWSGIHandler())
class ResourceTestCase(AuthTestCase):
@property
def resource_class(self):
if not hasattr(self, "_resource_class"):
self._resource_class = self.get_resource_class()
return self._resource_class
def get_resource_class(self):
raise NotImplementedError
@property
def resource_list_class(self):
if not hasattr(self, "_resource_list_class"):
|
return self._resource_list_class
def get_resource_list_class(self):
raise NotImplementedError
def assertSameResource(self, res1, res2):
self.assertEqual(res1._location, res2._location)
def assertSameResourceList(self, list1, list2):
self.assertEqual(
[r._location for r in list1],
[r._location for r in list2],
)
class TestResourceTestCase(ResourceTestCase):
builder = ListBuilder(
"testresource",
"testresources",
"Testresource",
{ "name": "Default name" })
def get_resource_class(self):
from ccui.core.api import RemoteObject, fields
def filter_callable(vals):
return ("callableFilter", [v+"foo" for v in vals])
class TestResource(RemoteObject):
name = fields.Field()
submit_as = fields.Field(api_name="submitAs")
non_field_filters = {
"non_field": "nonField",
"callable": filter_callable,
}
cache = False
def __unicode__(self_):
return u"__unicode__ of %s" % self_.name
return TestResource
def get_resource_list_class(self):
from ccui.core.api import ListObject, fields
class TestResourceList(ListObject):
entryclass = self.resource_class
api_name = "testresources"
default_url = "testresources"
entries = fields.List(fields.Object(self.resource_class))
cache = False
return TestResourceList
class BaseResourceTest(object):
"""
Generic smoke
|
self._resource_list_class = self.get_resource_list_class()
|
conditional_block
|
utils.py
|
from .products.builders import products
from .responses import response, make_identity, make_error
from .static.builders import codevalues
from .users.builders import users
def fill_cache(cache, values_dict):
"""
Fill a mock cache object with some keys and values.
"""
cache.get.side_effect = lambda k, d=None: values_dict.get(k, d)
def setup_responses(http, response_dict):
"""
Setup a mock http object with some responses to given
URLs. ``response_dict`` should map full URLs (including query string) to
the (response, content) tuple that will be returned (equivalent to the
return value of the httplib2.Http.request method).
"""
url_dict = dict((Url(k), v) for k, v in response_dict.iteritems())
def request(*args, **kwargs):
uri = Url(kwargs["uri"])
try:
return url_dict[uri]
except KeyError:
return response(
make_error(
"Mock got unexpected request URI: %s \n"
" -- Options are %s --" % (uri, response_dict.keys())
),
500)
http.request.side_effect = request
COMMON_RESPONSES = {
"http://fake.base/rest/companies/1?_type=json":
response(companies.one(
resourceIdentity=make_identity(id=1, url="companies/1"))),
"http://fake.base/rest/users?_type=json":
response(users.searchresult({})),
"http://fake.base/rest/users/current?_type=json":
response(users.one()),
"http://fake.base/rest/products?_type=json":
response(products.searchresult({})),
"http://fake.base/rest/environments?_type=json":
response(environments.searchresult({}, {})),
"http://fake.base/staticData/values/TESTCYCLESTATUS?_type=json":
response(codevalues.array(
{"description": "DRAFT", "id": 1},
{"description": "ACTIVE", "id": 2},
{"description": "LOCKED", "id": 3},
{"description": "CLOSED", "id": 4},
{"description": "DISCARDED", "id": 5},
)),
"http://fake.base/staticData/values/TESTRUNSTATUS?_type=json":
response(codevalues.array(
{"description": "DRAFT", "id": 1},
{"description": "ACTIVE", "id": 2},
{"description": "LOCKED", "id": 3},
{"description": "CLOSED", "id": 4},
{"description": "DISCARDED", "id": 5},
)),
"http://fake.base/staticData/values/TESTCASESTATUS?_type=json":
response(codevalues.array(
{"description": "DRAFT", "id": 1},
{"description": "ACTIVE", "id": 2},
{"description": "LOCKED", "id": 3},
{"description": "CLOSED", "id": 4},
{"description": "DISCARDED", "id": 5},
)),
"http://fake.base/staticData/values/TESTRUNRESULTSTATUS?_type=json":
response(codevalues.array(
{"description": "PENDING", "id": 1},
{"description": "PASSED", "id": 2},
{"description": "FAILED", "id": 3},
{"description": "BLOCKED", "id": 4},
{"description": "STARTED", "id": 5},
{"description": "INVALIDATED", "id": 6},
)),
"http://fake.base/staticData/values/APPROVALSTATUS?_type=json":
response(codevalues.array(
{"description": "PENDING", "id": 1},
{"description": "APPROVED", "id": 2},
{"description": "REJECTED", "id": 3},
)),
"http://fake.base/staticData/values/ATTACHMENTTYPE?_type=json":
response(codevalues.array(
{"description": "BRANDING", "id": 1},
{"description": "DESIGN", "id": 2},
{"description": "USERGUIDE", "id": 3},
{"description": "REQUIREMENTS", "id": 4},
{"description": "KNOWNISSUES", "id": 5},
{"description": "SCREENCAPTURE", "id": 6},
{"description": "NDA", "id": 7},
{"description": "UNSPECIFIED", "id": 8},
)),
}
def setup_common_responses(http, response_dict):
"""
A version of ``setup_responses`` intended for end-to-end request-response
testing. Automatically knows how to respond to the StaticCompanyMiddleware
query for the current company, and to static data requests.
"""
new_dict = COMMON_RESPONSES.copy()
new_dict.update(response_dict)
return setup_responses(http, new_dict)
@contextmanager
def locmem_cache():
cache = get_cache("django.core.cache.backends.locmem.LocMemCache")
cache.clear()
patcher = patch("ccui.core.cache.cache", cache)
patcher.start()
yield cache
patcher.stop()
class CachingFunctionalTestMixin(object):
def setUp(self):
self.cache = get_cache("django.core.cache.backends.locmem.LocMemCache")
self.cache.clear()
self.patcher = patch("ccui.core.cache.cache", self.cache)
self.patcher.start()
self.addCleanup(self.patcher.stop)
def creds(email, password=None, cookie=None):
from ccui.users.auth import UserCredentials
from ccui.users.models import User
creds = UserCredentials(email, password=password, cookie=cookie)
creds._user = User(email=email)
creds._user.auth = creds
creds._permission_codes = []
return creds
class AuthTestCase(TestCase):
def creds(self, email, password=None, cookie=None):
return creds(email, password, cookie)
@property
def auth(self):
"""
Since the server responses are mocked, we could just ignore auth when
not testing it specifically, but we include it for all requests to more
closely match real usage.
"""
return self.creds("admin@example.com", cookie="USERTOKEN: authcookie")
class ViewTestCase(AuthTestCase):
factory = RequestFactory()
def setUp(self):
self.rendered = {}
on_template_render = partial(store_rendered_templates, self.rendered)
template_rendered.connect(on_template_render)
self.addCleanup(template_rendered.disconnect, on_template_render)
def setup_responses(self, http, response_dict=None, user=None):
if user is None:
user = self.auth.user
|
response_dict.setdefault(
"http://fake.base/rest/users/current?_type=json",
response(
users.one(
email=user.email,
firstName=user.firstName,
lastName=user.lastName,
screenName=user.screenName
)
)
)
setup_common_responses(http, response_dict)
@property
def app(self):
class AuthWSGIHandler(WSGIHandler):
def get_response(self_, request):
request._cached_user = self.auth.user
request._cached_auth = self.auth
return super(AuthWSGIHandler, self_).get_response(request)
return TestApp(AuthWSGIHandler())
class ResourceTestCase(AuthTestCase):
@property
def resource_class(self):
if not hasattr(self, "_resource_class"):
self._resource_class = self.get_resource_class()
return self._resource_class
def get_resource_class(self):
raise NotImplementedError
@property
def resource_list_class(self):
if not hasattr(self, "_resource_list_class"):
self._resource_list_class = self.get_resource_list_class()
return self._resource_list_class
def get_resource_list_class(self):
raise NotImplementedError
def assertSameResource(self, res1, res2):
self.assertEqual(res1._location, res2._location)
def assertSameResourceList(self, list1, list2):
self.assertEqual(
[r._location for r in list1],
[r._location for r in list2],
)
class TestResourceTestCase(ResourceTestCase):
builder = ListBuilder(
"testresource",
"testresources",
"Testresource",
{ "name": "Default name" })
def get_resource_class(self):
from ccui.core.api import RemoteObject, fields
def filter_callable(vals):
return ("callableFilter", [v+"foo" for v in vals])
class TestResource(RemoteObject):
name = fields.Field()
submit_as = fields.Field(api_name="submitAs")
non_field_filters = {
"non_field": "nonField",
"callable": filter_callable,
}
cache = False
def __unicode__(self_):
return u"__unicode__ of %s" % self_.name
return TestResource
def get_resource_list_class(self):
from ccui.core.api import ListObject, fields
class TestResourceList(ListObject):
entryclass = self.resource_class
api_name = "testresources"
default_url = "testresources"
entries = fields.List(fields.Object(self.resource_class))
cache = False
return TestResourceList
class BaseResourceTest(object):
"""
Generic smoke tests
|
if response_dict is None:
response_dict = {}
else:
response_dict = response_dict.copy()
|
random_line_split
|
utils.py
|
make_identity, make_error
from .static.builders import codevalues
from .users.builders import users
def fill_cache(cache, values_dict):
"""
Fill a mock cache object with some keys and values.
"""
cache.get.side_effect = lambda k, d=None: values_dict.get(k, d)
def setup_responses(http, response_dict):
"""
Setup a mock http object with some responses to given
URLs. ``response_dict`` should map full URLs (including query string) to
the (response, content) tuple that will be returned (equivalent to the
return value of the httplib2.Http.request method).
"""
url_dict = dict((Url(k), v) for k, v in response_dict.iteritems())
def request(*args, **kwargs):
uri = Url(kwargs["uri"])
try:
return url_dict[uri]
except KeyError:
return response(
make_error(
"Mock got unexpected request URI: %s \n"
" -- Options are %s --" % (uri, response_dict.keys())
),
500)
http.request.side_effect = request
COMMON_RESPONSES = {
"http://fake.base/rest/companies/1?_type=json":
response(companies.one(
resourceIdentity=make_identity(id=1, url="companies/1"))),
"http://fake.base/rest/users?_type=json":
response(users.searchresult({})),
"http://fake.base/rest/users/current?_type=json":
response(users.one()),
"http://fake.base/rest/products?_type=json":
response(products.searchresult({})),
"http://fake.base/rest/environments?_type=json":
response(environments.searchresult({}, {})),
"http://fake.base/staticData/values/TESTCYCLESTATUS?_type=json":
response(codevalues.array(
{"description": "DRAFT", "id": 1},
{"description": "ACTIVE", "id": 2},
{"description": "LOCKED", "id": 3},
{"description": "CLOSED", "id": 4},
{"description": "DISCARDED", "id": 5},
)),
"http://fake.base/staticData/values/TESTRUNSTATUS?_type=json":
response(codevalues.array(
{"description": "DRAFT", "id": 1},
{"description": "ACTIVE", "id": 2},
{"description": "LOCKED", "id": 3},
{"description": "CLOSED", "id": 4},
{"description": "DISCARDED", "id": 5},
)),
"http://fake.base/staticData/values/TESTCASESTATUS?_type=json":
response(codevalues.array(
{"description": "DRAFT", "id": 1},
{"description": "ACTIVE", "id": 2},
{"description": "LOCKED", "id": 3},
{"description": "CLOSED", "id": 4},
{"description": "DISCARDED", "id": 5},
)),
"http://fake.base/staticData/values/TESTRUNRESULTSTATUS?_type=json":
response(codevalues.array(
{"description": "PENDING", "id": 1},
{"description": "PASSED", "id": 2},
{"description": "FAILED", "id": 3},
{"description": "BLOCKED", "id": 4},
{"description": "STARTED", "id": 5},
{"description": "INVALIDATED", "id": 6},
)),
"http://fake.base/staticData/values/APPROVALSTATUS?_type=json":
response(codevalues.array(
{"description": "PENDING", "id": 1},
{"description": "APPROVED", "id": 2},
{"description": "REJECTED", "id": 3},
)),
"http://fake.base/staticData/values/ATTACHMENTTYPE?_type=json":
response(codevalues.array(
{"description": "BRANDING", "id": 1},
{"description": "DESIGN", "id": 2},
{"description": "USERGUIDE", "id": 3},
{"description": "REQUIREMENTS", "id": 4},
{"description": "KNOWNISSUES", "id": 5},
{"description": "SCREENCAPTURE", "id": 6},
{"description": "NDA", "id": 7},
{"description": "UNSPECIFIED", "id": 8},
)),
}
def setup_common_responses(http, response_dict):
"""
A version of ``setup_responses`` intended for end-to-end request-response
testing. Automatically knows how to respond to the StaticCompanyMiddleware
query for the current company, and to static data requests.
"""
new_dict = COMMON_RESPONSES.copy()
new_dict.update(response_dict)
return setup_responses(http, new_dict)
@contextmanager
def locmem_cache():
cache = get_cache("django.core.cache.backends.locmem.LocMemCache")
cache.clear()
patcher = patch("ccui.core.cache.cache", cache)
patcher.start()
yield cache
patcher.stop()
class CachingFunctionalTestMixin(object):
def setUp(self):
self.cache = get_cache("django.core.cache.backends.locmem.LocMemCache")
self.cache.clear()
self.patcher = patch("ccui.core.cache.cache", self.cache)
self.patcher.start()
self.addCleanup(self.patcher.stop)
def creds(email, password=None, cookie=None):
from ccui.users.auth import UserCredentials
from ccui.users.models import User
creds = UserCredentials(email, password=password, cookie=cookie)
creds._user = User(email=email)
creds._user.auth = creds
creds._permission_codes = []
return creds
class AuthTestCase(TestCase):
def creds(self, email, password=None, cookie=None):
return creds(email, password, cookie)
@property
def auth(self):
"""
Since the server responses are mocked, we could just ignore auth when
not testing it specifically, but we include it for all requests to more
closely match real usage.
"""
return self.creds("admin@example.com", cookie="USERTOKEN: authcookie")
class ViewTestCase(AuthTestCase):
factory = RequestFactory()
def setUp(self):
self.rendered = {}
on_template_render = partial(store_rendered_templates, self.rendered)
template_rendered.connect(on_template_render)
self.addCleanup(template_rendered.disconnect, on_template_render)
def setup_responses(self, http, response_dict=None, user=None):
if user is None:
user = self.auth.user
if response_dict is None:
response_dict = {}
else:
response_dict = response_dict.copy()
response_dict.setdefault(
"http://fake.base/rest/users/current?_type=json",
response(
users.one(
email=user.email,
firstName=user.firstName,
lastName=user.lastName,
screenName=user.screenName
)
)
)
setup_common_responses(http, response_dict)
@property
def app(self):
class AuthWSGIHandler(WSGIHandler):
def get_response(self_, request):
request._cached_user = self.auth.user
request._cached_auth = self.auth
return super(AuthWSGIHandler, self_).get_response(request)
return TestApp(AuthWSGIHandler())
class ResourceTestCase(AuthTestCase):
@property
def resource_class(self):
if not hasattr(self, "_resource_class"):
self._resource_class = self.get_resource_class()
return self._resource_class
def get_resource_class(self):
raise NotImplementedError
@property
def resource_list_class(self):
if not hasattr(self, "_resource_list_class"):
self._resource_list_class = self.get_resource_list_class()
return self._resource_list_class
def get_resource_list_class(self):
raise NotImplementedError
def assertSameResource(self, res1, res2):
self.assertEqual(res1._location, res2._location)
def assertSameResourceList(self, list1, list2):
self.assertEqual(
[r._location for r in list1],
[r._location for r in list2],
)
class TestResourceTestCase(ResourceTestCase):
builder = ListBuilder(
"testresource",
"testresources",
"Testresource",
{ "name": "Default name" })
def get_resource_class(self):
from ccui.core.api import RemoteObject, fields
def filter_callable(vals):
return ("callableFilter", [v+"foo" for v in vals])
class TestResource(RemoteObject):
name = fields.Field()
submit_as = fields.Field(api_name="submitAs")
non_field_filters = {
"non_field": "nonField",
"callable": filter_callable,
}
cache = False
def __unicode__(self_):
return u"__unicode__ of %s" % self_.name
return TestResource
def get_resource_list_class(self):
from ccui.core.api import ListObject, fields
class TestResourceList(ListObject):
entryclass = self.resource_class
api_name = "testresources"
default_url = "testresources"
entries = fields.List(fields.Object(self.resource_class))
cache = False
return TestResourceList
class BaseResourceTest(object):
|
"""
Generic smoke tests that will be run for all resource types.
"""
pass
|
identifier_body
|
|
jquery-collision.js
|
.proto.css("padding-left")) || 0;
clone.x2 -= parseInt(this.proto.css("padding-right")) || 0;
clone.x2 -= parseInt(this.proto.css("border-right")) || 0;
clone.x2 -= parseInt(this.proto.css("margin-right")) || 0;
clone.y1 += parseInt(this.proto.css("margin-top")) || 0;
clone.y1 += parseInt(this.proto.css("border-top")) || 0;
clone.y1 += parseInt(this.proto.css("padding-top")) || 0;
clone.y2 -= parseInt(this.proto.css("padding-bottom")) || 0;
clone.y2 -= parseInt(this.proto.css("border-bottom")) || 0;
clone.y2 -= parseInt(this.proto.css("margin-bottom")) || 0;
}
return clone;
}
CollisionCoords.prototype.move = function(dx, dy) {
this.x1 += dx;
this.x2 += dx;
this.y1 += dy;
this.y2 += dy;
return this;
};
CollisionCoords.prototype.update = function(obj) {
if ("x1" in obj) this.x1 = obj["x1"];
if ("x2" in obj) this.x1 = obj["x2"];
if ("y1" in obj) this.x1 = obj["y1"];
if ("y2" in obj) this.x1 = obj["y2"];
if ("left" in obj) {
var w = this.x2 - this.x1;
this.x1 = obj["left"];
this.x2 = this.x1 + w;
}
if ("top" in obj) {
var h = this.y2 - this.y1;
this.y1 = obj["top"];
this.y2 = this.y1 + h;
}
if ("offset" in obj) {
var o = obj.offset();
this.update(o);
this.x2 = this.x1 + obj.width();
this.y2 = this.y1 + obj.height();
}
if ("dir" in obj) this.x1 = obj["dir"];
return this;
};
CollisionCoords.prototype.width = function() { return (this.x2 - this.x1); };
CollisionCoords.prototype.height = function() { return (this.y2 - this.y1); };
CollisionCoords.prototype.centerx = function() { return (this.x1 + this.x2) / 2; };
CollisionCoords.prototype.centery = function() { return (this.y1 + this.y2) / 2; };
CollisionCoords.prototype.toString = function() {
return (this.proto["get"] ? "#" + this.proto.get(0).id : "") + "[" + [this.x1, this.y1, this.x2, this.y2].join(",") + "]";
};
// the big mistake in a lot of collision-detectors,
// make floating-point arithmetic work for you, not against you:
CollisionCoords.EPSILON = 0.001;
CollisionCoords.prototype.containsPoint = function(x, y, inclusive) {
if (!inclusive) inclusive = false;
var epsilon = (inclusive ? -1 : +1) * CollisionCoords.EPSILON;
if ((x > (this.x1 + epsilon) && x < (this.x2 - epsilon)) &&
(y > (this.y1 + epsilon) && y < (this.y2 - epsilon)))
return true;
else
return false;
};
CollisionCoords.prototype.overlaps = function(other, inclusive) {
var hit = this._overlaps(other, inclusive);
if (hit.length > 0) return hit;
hit = other._overlaps(this, inclusive);
if (hit.length > 0) {
hit[0].dir = hit[0].dir == "Inside" ? "Outside" :
hit[0].dir == "Outside" ? "Inside" :
hit[0].dir == "N" ? "S" :
hit[0].dir == "S" ? "N" :
hit[0].dir == "W" ? "E" :
hit[0].dir == "E" ? "W" :
hit[0].dir == "NE" ? "SW" :
hit[0].dir == "SW" ? "NE" :
hit[0].dir == "SE" ? "NW" :
hit[0].dir == "NW" ? "SE" :
undefined;
}
return hit || [];
}
CollisionCoords.prototype._overlaps = function(other, inclusive) {
var c1 = other;
var c2 = this;
if (!inclusive) inclusive = false;
var ax = c1.centerx();
var ay = c1.centery();
// nine points to check whether they're in e2: e1's four corners, e1's center-sides, and e1's center
// if center of e1 is within e2, there's some kind of total inclusion
var points = [[c1.x1, c1.y1, "SE"], [c1.x2, c1.y1, "SW"], [c1.x2, c1.y2, "NW"], [c1.x1, c1.y2, "NE"], [ax, c1.y1, "S"], [c1.x2, ay, "W"], [ax, c1.y2, "N"], [c1.x1, ay, "E"], [ax, ay, undefined]];
var hit = null;
var dirs = { NW: false, N: false, NE: false, E: false, SE: false, S: false, SW: false, W: false };
for (var i = 0; i < points.length; i++) {
if (this.containsPoint(points[i][0], points[i][1], inclusive)) {
if (points[i][2]) dirs[points[i][2]] = true;
if (hit) continue; // don't need to make another one - it'll be the same anyways //
hit = [new CollisionCoords({ x1: Math.max(c1.x1, c2.x1), y1: Math.max(c1.y1, c2.y1),
x2: Math.min(c1.x2, c2.x2), y2: Math.min(c1.y2, c2.y2), dir: points[i][2]
})];
}
}
if (hit) {
if (dirs["NW"] && dirs["NE"]) hit[0].dir = "N";
if (dirs["NE"] && dirs["SE"]) hit[0].dir = "E";
if (dirs["SE"] && dirs["SW"]) hit[0].dir = "S";
if (dirs["SW"] && dirs["NW"]) hit[0].dir = "W";
if (dirs["NW"] && dirs["NE"] &&
dirs["SE"] && dirs["SW"]) hit[0].dir = "Outside";
if (!dirs["NW"] && !dirs["NE"] &&
!dirs["SE"] && !dirs["SW"] &&
!dirs["N"] && !dirs["E"] &&
!dirs["S"] && !dirs["W"]) hit[0].dir = "Inside";
}
return hit || [];
};
CollisionCoords.prototype._protrusion = function(area, dir, list) {
var o = this.overlaps(new CollisionCoords(area), false);
if (o.length <= 0) return list;
o[0].dir = dir;
list.push(o[0]);
return list;
};
CollisionCoords.prototype.protrusions = function(container) {
var list = [];
var n = Number.NEGATIVE_INFINITY;
var p = Number.POSITIVE_INFINITY;
var l = container.x1;
var r = container.x2;
var t = container.y1;
var b = container.y2;
list = this._protrusion({ x1: l, y1: n, x2: r, y2: t }, "N", list);
list = this._protrusion({ x1: r, y1: n, x2: p, y2: t }, "NE", list);
list = this._protrusion({ x1: r, y1: t, x2: p, y2: b }, "E", list);
list = this._protrusion({ x1: r, y1: b, x2: p, y2: p }, "SE", list);
list = this._protrusion({ x1: l, y1: b, x2: r, y2: p }, "S", list);
list = this._protrusion({ x1: n, y1: b, x2: l, y2: p }, "SW", list);
list = this._protrusion({ x1: n, y1: t, x2: l, y2: b }, "W", list);
list = this._protrusion({ x1: n, y1: n, x2: l, y2: t }, "NW", list);
return list;
};
function Collision(targetNode, obstacleNode, overlapCoords, overlapType)
|
{
this.target = targetNode;
this.obstacle = obstacleNode;
this.overlap = overlapCoords;
this.overlapType = overlapType;
}
|
identifier_body
|
|
jquery-collision.js
|
+
(parseInt(proto.css("padding-right")) || 0) + (parseInt(proto.css("border-right")) || 0) + (parseInt(proto.css("margin-right")) || 0);
this.y2 += this.y1;
this.y2 += (parseInt(proto.css("margin-top")) || 0) + (parseInt(proto.css("border-top")) || 0) + (parseInt(proto.css("padding-top")) || 0) +
(parseInt(proto.css("padding-bottom")) || 0) + (parseInt(proto.css("border-bottom")) || 0) + (parseInt(proto.css("margin-bottom")) || 0);
}
else {
var o = proto.offset();
this.x1 = o.left - (parseInt(proto.css("margin-left")) || 0); // not also border -- offset starts from inside margin but outside border
this.y1 = o.top - (parseInt(proto.css("margin-top")) || 0); // not also border -- offset starts from inside margin but outside border
this.x2 = this.x1 + proto.outerWidth(true);
this.y2 = this.y1 + proto.outerHeight(true);
}
this.proto = proto;
}
else if ("x1" in proto) {
// used to effectively "clone"
this.x1 = proto.x1;
this.y1 = proto.y1;
this.x2 = proto.x2;
this.y2 = proto.y2;
this.proto = proto;
}
if ("dir" in proto) {
this.dir = proto.dir;
}
}
CollisionCoords.prototype.innerContainer = function() {
var clone = new CollisionCoords(this);
if (this.proto["css"])
|
return clone;
}
CollisionCoords.prototype.move = function(dx, dy) {
this.x1 += dx;
this.x2 += dx;
this.y1 += dy;
this.y2 += dy;
return this;
};
CollisionCoords.prototype.update = function(obj) {
if ("x1" in obj) this.x1 = obj["x1"];
if ("x2" in obj) this.x1 = obj["x2"];
if ("y1" in obj) this.x1 = obj["y1"];
if ("y2" in obj) this.x1 = obj["y2"];
if ("left" in obj) {
var w = this.x2 - this.x1;
this.x1 = obj["left"];
this.x2 = this.x1 + w;
}
if ("top" in obj) {
var h = this.y2 - this.y1;
this.y1 = obj["top"];
this.y2 = this.y1 + h;
}
if ("offset" in obj) {
var o = obj.offset();
this.update(o);
this.x2 = this.x1 + obj.width();
this.y2 = this.y1 + obj.height();
}
if ("dir" in obj) this.x1 = obj["dir"];
return this;
};
CollisionCoords.prototype.width = function() { return (this.x2 - this.x1); };
CollisionCoords.prototype.height = function() { return (this.y2 - this.y1); };
CollisionCoords.prototype.centerx = function() { return (this.x1 + this.x2) / 2; };
CollisionCoords.prototype.centery = function() { return (this.y1 + this.y2) / 2; };
CollisionCoords.prototype.toString = function() {
return (this.proto["get"] ? "#" + this.proto.get(0).id : "") + "[" + [this.x1, this.y1, this.x2, this.y2].join(",") + "]";
};
// the big mistake in a lot of collision-detectors,
// make floating-point arithmetic work for you, not against you:
CollisionCoords.EPSILON = 0.001;
CollisionCoords.prototype.containsPoint = function(x, y, inclusive) {
if (!inclusive) inclusive = false;
var epsilon = (inclusive ? -1 : +1) * CollisionCoords.EPSILON;
if ((x > (this.x1 + epsilon) && x < (this.x2 - epsilon)) &&
(y > (this.y1 + epsilon) && y < (this.y2 - epsilon)))
return true;
else
return false;
};
CollisionCoords.prototype.overlaps = function(other, inclusive) {
var hit = this._overlaps(other, inclusive);
if (hit.length > 0) return hit;
hit = other._overlaps(this, inclusive);
if (hit.length > 0) {
hit[0].dir = hit[0].dir == "Inside" ? "Outside" :
hit[0].dir == "Outside" ? "Inside" :
hit[0].dir == "N" ? "S" :
hit[0].dir == "S" ? "N" :
hit[0].dir == "W" ? "E" :
hit[0].dir == "E" ? "W" :
hit[0].dir == "NE" ? "SW" :
hit[0].dir == "SW" ? "NE" :
hit[0].dir == "SE" ? "NW" :
hit[0].dir == "NW" ? "SE" :
undefined;
}
return hit || [];
}
CollisionCoords.prototype._overlaps = function(other, inclusive) {
var c1 = other;
var c2 = this;
if (!inclusive) inclusive = false;
var ax = c1.centerx();
var ay = c1.centery();
// nine points to check whether they're in e2: e1's four corners, e1's center-sides, and e1's center
// if center of e1 is within e2, there's some kind of total inclusion
var points = [[c1.x1, c1.y1, "SE"], [c1.x2, c1.y1, "SW"], [c1.x2, c1.y2, "NW"], [c1.x1, c1.y2, "NE"], [ax, c1.y1, "S"], [c1.x2, ay, "W"], [ax, c1.y2, "N"], [c1.x1, ay, "E"], [ax, ay, undefined]];
var hit = null;
var dirs = { NW: false, N: false, NE: false, E: false, SE: false, S: false, SW: false, W: false };
for (var i = 0; i < points.length; i++) {
if (this.containsPoint(points[i][0], points[i][1], inclusive)) {
if (points[i][2]) dirs[points[i][2]] = true;
if (hit) continue; // don't need to make another one - it'll be the same anyways //
hit = [new CollisionCoords({ x1: Math.max(c1.x1, c2.x1), y1: Math.max(c1.y1, c2.y1),
x2: Math.min(c1.x2, c2.x2), y2: Math.min(c1.y2, c2.y2), dir: points[i][2]
})];
}
}
if (hit) {
if (dirs["NW"] && dirs["NE"]) hit[0].dir = "N";
if (dirs["NE"] && dirs["SE"]) hit[0].dir = "E";
if (dirs["SE"] && dirs["SW"]) hit[0].dir = "S";
if (dirs["SW"] && dirs["NW"]) hit[0].dir = "W";
if (dirs["NW"] && dirs["NE"] &&
dirs["SE"] && dirs["SW"]) hit[0].dir = "Outside";
if (!dirs["NW"] && !dirs["NE"] &&
!dirs["SE"] && !dirs["SW"] &&
!dirs["N"] && !dirs["E"] &&
!dirs["S"] && !dirs["W"]) hit[0].dir = "Inside";
}
return hit || [];
};
CollisionCoords.prototype._protrusion = function(area, dir, list) {
var o = this.overlaps(new CollisionCoords(area), false);
if (o.length <= 0) return list;
o[0].dir = dir;
list.push(o[0
|
{
clone.x1 += parseInt(this.proto.css("margin-left")) || 0;
clone.x1 += parseInt(this.proto.css("border-left")) || 0;
clone.x1 += parseInt(this.proto.css("padding-left")) || 0;
clone.x2 -= parseInt(this.proto.css("padding-right")) || 0;
clone.x2 -= parseInt(this.proto.css("border-right")) || 0;
clone.x2 -= parseInt(this.proto.css("margin-right")) || 0;
clone.y1 += parseInt(this.proto.css("margin-top")) || 0;
clone.y1 += parseInt(this.proto.css("border-top")) || 0;
clone.y1 += parseInt(this.proto.css("padding-top")) || 0;
clone.y2 -= parseInt(this.proto.css("padding-bottom")) || 0;
clone.y2 -= parseInt(this.proto.css("border-bottom")) || 0;
clone.y2 -= parseInt(this.proto.css("margin-bottom")) || 0;
}
|
conditional_block
|
jquery-collision.js
|
) +
(parseInt(proto.css("padding-right")) || 0) + (parseInt(proto.css("border-right")) || 0) + (parseInt(proto.css("margin-right")) || 0);
this.y2 += this.y1;
this.y2 += (parseInt(proto.css("margin-top")) || 0) + (parseInt(proto.css("border-top")) || 0) + (parseInt(proto.css("padding-top")) || 0) +
(parseInt(proto.css("padding-bottom")) || 0) + (parseInt(proto.css("border-bottom")) || 0) + (parseInt(proto.css("margin-bottom")) || 0);
}
else {
var o = proto.offset();
this.x1 = o.left - (parseInt(proto.css("margin-left")) || 0); // not also border -- offset starts from inside margin but outside border
this.y1 = o.top - (parseInt(proto.css("margin-top")) || 0); // not also border -- offset starts from inside margin but outside border
this.x2 = this.x1 + proto.outerWidth(true);
this.y2 = this.y1 + proto.outerHeight(true);
}
this.proto = proto;
}
else if ("x1" in proto) {
// used to effectively "clone"
this.x1 = proto.x1;
this.y1 = proto.y1;
this.x2 = proto.x2;
this.y2 = proto.y2;
this.proto = proto;
}
if ("dir" in proto) {
this.dir = proto.dir;
}
}
CollisionCoords.prototype.innerContainer = function() {
var clone = new CollisionCoords(this);
if (this.proto["css"]) {
clone.x1 += parseInt(this.proto.css("margin-left")) || 0;
clone.x1 += parseInt(this.proto.css("border-left")) || 0;
clone.x1 += parseInt(this.proto.css("padding-left")) || 0;
clone.x2 -= parseInt(this.proto.css("padding-right")) || 0;
clone.x2 -= parseInt(this.proto.css("border-right")) || 0;
clone.x2 -= parseInt(this.proto.css("margin-right")) || 0;
clone.y1 += parseInt(this.proto.css("margin-top")) || 0;
clone.y1 += parseInt(this.proto.css("border-top")) || 0;
clone.y1 += parseInt(this.proto.css("padding-top")) || 0;
clone.y2 -= parseInt(this.proto.css("padding-bottom")) || 0;
clone.y2 -= parseInt(this.proto.css("border-bottom")) || 0;
clone.y2 -= parseInt(this.proto.css("margin-bottom")) || 0;
}
return clone;
}
CollisionCoords.prototype.move = function(dx, dy) {
this.x1 += dx;
this.x2 += dx;
this.y1 += dy;
this.y2 += dy;
return this;
};
CollisionCoords.prototype.update = function(obj) {
if ("x1" in obj) this.x1 = obj["x1"];
if ("x2" in obj) this.x1 = obj["x2"];
if ("y1" in obj) this.x1 = obj["y1"];
if ("y2" in obj) this.x1 = obj["y2"];
if ("left" in obj) {
var w = this.x2 - this.x1;
this.x1 = obj["left"];
this.x2 = this.x1 + w;
}
if ("top" in obj) {
var h = this.y2 - this.y1;
this.y1 = obj["top"];
this.y2 = this.y1 + h;
}
if ("offset" in obj) {
var o = obj.offset();
this.update(o);
this.x2 = this.x1 + obj.width();
this.y2 = this.y1 + obj.height();
}
if ("dir" in obj) this.x1 = obj["dir"];
return this;
};
CollisionCoords.prototype.width = function() { return (this.x2 - this.x1); };
CollisionCoords.prototype.height = function() { return (this.y2 - this.y1); };
CollisionCoords.prototype.centerx = function() { return (this.x1 + this.x2) / 2; };
CollisionCoords.prototype.centery = function() { return (this.y1 + this.y2) / 2; };
CollisionCoords.prototype.toString = function() {
return (this.proto["get"] ? "#" + this.proto.get(0).id : "") + "[" + [this.x1, this.y1, this.x2, this.y2].join(",") + "]";
};
// the big mistake in a lot of collision-detectors,
// make floating-point arithmetic work for you, not against you:
CollisionCoords.EPSILON = 0.001;
CollisionCoords.prototype.containsPoint = function(x, y, inclusive) {
if (!inclusive) inclusive = false;
var epsilon = (inclusive ? -1 : +1) * CollisionCoords.EPSILON;
if ((x > (this.x1 + epsilon) && x < (this.x2 - epsilon)) &&
(y > (this.y1 + epsilon) && y < (this.y2 - epsilon)))
return true;
else
return false;
};
CollisionCoords.prototype.overlaps = function(other, inclusive) {
var hit = this._overlaps(other, inclusive);
if (hit.length > 0) return hit;
hit = other._overlaps(this, inclusive);
if (hit.length > 0) {
hit[0].dir = hit[0].dir == "Inside" ? "Outside" :
hit[0].dir == "Outside" ? "Inside" :
hit[0].dir == "N" ? "S" :
hit[0].dir == "S" ? "N" :
hit[0].dir == "W" ? "E" :
hit[0].dir == "E" ? "W" :
hit[0].dir == "NE" ? "SW" :
hit[0].dir == "SW" ? "NE" :
hit[0].dir == "SE" ? "NW" :
hit[0].dir == "NW" ? "SE" :
undefined;
}
return hit || [];
}
CollisionCoords.prototype._overlaps = function(other, inclusive) {
var c1 = other;
var c2 = this;
if (!inclusive) inclusive = false;
var ax = c1.centerx();
var ay = c1.centery();
// nine points to check whether they're in e2: e1's four corners, e1's center-sides, and e1's center
// if center of e1 is within e2, there's some kind of total inclusion
var points = [[c1.x1, c1.y1, "SE"], [c1.x2, c1.y1, "SW"], [c1.x2, c1.y2, "NW"], [c1.x1, c1.y2, "NE"], [ax, c1.y1, "S"], [c1.x2, ay, "W"], [ax, c1.y2, "N"], [c1.x1, ay, "E"], [ax, ay, undefined]];
var hit = null;
var dirs = { NW: false, N: false, NE: false, E: false, SE: false, S: false, SW: false, W: false };
for (var i = 0; i < points.length; i++) {
if (this.containsPoint(points[i][0], points[i][1], inclusive)) {
if (points[i][2]) dirs[points[i][2]] = true;
if (hit) continue; // don't need to make another one - it'll be the same anyways //
hit = [new CollisionCoords({ x1: Math.max(c1.x1, c2.x1), y1: Math.max(c1.y1, c2.y1),
x2: Math.min(c1.x2, c2.x2), y2: Math.min(c1.y2, c2.y2), dir: points[i][2]
|
})];
}
}
if (hit) {
if (dirs["NW"] && dirs["NE"]) hit[0].dir = "N";
if (dirs["NE"] && dirs["SE"]) hit[0].dir = "E";
if (dirs["SE"] && dirs["SW"]) hit[0].dir = "S";
if (dirs["SW"] && dirs["NW"]) hit[0].dir = "W";
if (dirs["NW"] && dirs["NE"] &&
dirs["SE"] && dirs["SW"]) hit[0].dir = "Outside";
if (!dirs["NW"] && !dirs["NE"] &&
!dirs["SE"] && !dirs["SW"] &&
!dirs["N"] && !dirs["E"] &&
!dirs["S"] && !dirs["W"]) hit[0].dir = "Inside";
}
return hit || [];
};
CollisionCoords.prototype._protrusion = function(area, dir, list) {
var o = this.overlaps(new CollisionCoords(area), false);
if (o.length <= 0) return list;
o[0].dir = dir;
list.push(o[0]);
|
random_line_split
|
|
jquery-collision.js
|
clone.y1 += parseInt(this.proto.css("border-top")) || 0;
clone.y1 += parseInt(this.proto.css("padding-top")) || 0;
clone.y2 -= parseInt(this.proto.css("padding-bottom")) || 0;
clone.y2 -= parseInt(this.proto.css("border-bottom")) || 0;
clone.y2 -= parseInt(this.proto.css("margin-bottom")) || 0;
}
return clone;
}
CollisionCoords.prototype.move = function(dx, dy) {
this.x1 += dx;
this.x2 += dx;
this.y1 += dy;
this.y2 += dy;
return this;
};
CollisionCoords.prototype.update = function(obj) {
if ("x1" in obj) this.x1 = obj["x1"];
if ("x2" in obj) this.x1 = obj["x2"];
if ("y1" in obj) this.x1 = obj["y1"];
if ("y2" in obj) this.x1 = obj["y2"];
if ("left" in obj) {
var w = this.x2 - this.x1;
this.x1 = obj["left"];
this.x2 = this.x1 + w;
}
if ("top" in obj) {
var h = this.y2 - this.y1;
this.y1 = obj["top"];
this.y2 = this.y1 + h;
}
if ("offset" in obj) {
var o = obj.offset();
this.update(o);
this.x2 = this.x1 + obj.width();
this.y2 = this.y1 + obj.height();
}
if ("dir" in obj) this.x1 = obj["dir"];
return this;
};
CollisionCoords.prototype.width = function() { return (this.x2 - this.x1); };
CollisionCoords.prototype.height = function() { return (this.y2 - this.y1); };
CollisionCoords.prototype.centerx = function() { return (this.x1 + this.x2) / 2; };
CollisionCoords.prototype.centery = function() { return (this.y1 + this.y2) / 2; };
CollisionCoords.prototype.toString = function() {
return (this.proto["get"] ? "#" + this.proto.get(0).id : "") + "[" + [this.x1, this.y1, this.x2, this.y2].join(",") + "]";
};
// the big mistake in a lot of collision-detectors,
// make floating-point arithmetic work for you, not against you:
CollisionCoords.EPSILON = 0.001;
CollisionCoords.prototype.containsPoint = function(x, y, inclusive) {
if (!inclusive) inclusive = false;
var epsilon = (inclusive ? -1 : +1) * CollisionCoords.EPSILON;
if ((x > (this.x1 + epsilon) && x < (this.x2 - epsilon)) &&
(y > (this.y1 + epsilon) && y < (this.y2 - epsilon)))
return true;
else
return false;
};
CollisionCoords.prototype.overlaps = function(other, inclusive) {
var hit = this._overlaps(other, inclusive);
if (hit.length > 0) return hit;
hit = other._overlaps(this, inclusive);
if (hit.length > 0) {
hit[0].dir = hit[0].dir == "Inside" ? "Outside" :
hit[0].dir == "Outside" ? "Inside" :
hit[0].dir == "N" ? "S" :
hit[0].dir == "S" ? "N" :
hit[0].dir == "W" ? "E" :
hit[0].dir == "E" ? "W" :
hit[0].dir == "NE" ? "SW" :
hit[0].dir == "SW" ? "NE" :
hit[0].dir == "SE" ? "NW" :
hit[0].dir == "NW" ? "SE" :
undefined;
}
return hit || [];
}
CollisionCoords.prototype._overlaps = function(other, inclusive) {
var c1 = other;
var c2 = this;
if (!inclusive) inclusive = false;
var ax = c1.centerx();
var ay = c1.centery();
// nine points to check whether they're in e2: e1's four corners, e1's center-sides, and e1's center
// if center of e1 is within e2, there's some kind of total inclusion
var points = [[c1.x1, c1.y1, "SE"], [c1.x2, c1.y1, "SW"], [c1.x2, c1.y2, "NW"], [c1.x1, c1.y2, "NE"], [ax, c1.y1, "S"], [c1.x2, ay, "W"], [ax, c1.y2, "N"], [c1.x1, ay, "E"], [ax, ay, undefined]];
var hit = null;
var dirs = { NW: false, N: false, NE: false, E: false, SE: false, S: false, SW: false, W: false };
for (var i = 0; i < points.length; i++) {
if (this.containsPoint(points[i][0], points[i][1], inclusive)) {
if (points[i][2]) dirs[points[i][2]] = true;
if (hit) continue; // don't need to make another one - it'll be the same anyways //
hit = [new CollisionCoords({ x1: Math.max(c1.x1, c2.x1), y1: Math.max(c1.y1, c2.y1),
x2: Math.min(c1.x2, c2.x2), y2: Math.min(c1.y2, c2.y2), dir: points[i][2]
})];
}
}
if (hit) {
if (dirs["NW"] && dirs["NE"]) hit[0].dir = "N";
if (dirs["NE"] && dirs["SE"]) hit[0].dir = "E";
if (dirs["SE"] && dirs["SW"]) hit[0].dir = "S";
if (dirs["SW"] && dirs["NW"]) hit[0].dir = "W";
if (dirs["NW"] && dirs["NE"] &&
dirs["SE"] && dirs["SW"]) hit[0].dir = "Outside";
if (!dirs["NW"] && !dirs["NE"] &&
!dirs["SE"] && !dirs["SW"] &&
!dirs["N"] && !dirs["E"] &&
!dirs["S"] && !dirs["W"]) hit[0].dir = "Inside";
}
return hit || [];
};
CollisionCoords.prototype._protrusion = function(area, dir, list) {
var o = this.overlaps(new CollisionCoords(area), false);
if (o.length <= 0) return list;
o[0].dir = dir;
list.push(o[0]);
return list;
};
CollisionCoords.prototype.protrusions = function(container) {
var list = [];
var n = Number.NEGATIVE_INFINITY;
var p = Number.POSITIVE_INFINITY;
var l = container.x1;
var r = container.x2;
var t = container.y1;
var b = container.y2;
list = this._protrusion({ x1: l, y1: n, x2: r, y2: t }, "N", list);
list = this._protrusion({ x1: r, y1: n, x2: p, y2: t }, "NE", list);
list = this._protrusion({ x1: r, y1: t, x2: p, y2: b }, "E", list);
list = this._protrusion({ x1: r, y1: b, x2: p, y2: p }, "SE", list);
list = this._protrusion({ x1: l, y1: b, x2: r, y2: p }, "S", list);
list = this._protrusion({ x1: n, y1: b, x2: l, y2: p }, "SW", list);
list = this._protrusion({ x1: n, y1: t, x2: l, y2: b }, "W", list);
list = this._protrusion({ x1: n, y1: n, x2: l, y2: t }, "NW", list);
return list;
};
function Collision(targetNode, obstacleNode, overlapCoords, overlapType) {
this.target = targetNode;
this.obstacle = obstacleNode;
this.overlap = overlapCoords;
this.overlapType = overlapType;
}
Collision.prototype.distance = function(other) {
var tc = c.target;
var oc = c.overlap;
return Math.sqrt((tc.centerx() - oc.centerx()) * (tc.centerx() - oc.centerx()) +
(tc.centery() - oc.centery()) * (tc.centery() - oc.centery()));
}
function
|
CollisionFactory
|
identifier_name
|
|
regex.py
|
import re
'''
search('r'pattern,text) : serach pattern in test, 'r' flag = raw string. which passes through backslashes without change which is very handy for regular expressions
importance of rflag
in particular, \b matches empty string specifically at the start and end of a word.
re expects the string \b, however normal string interpretation '\b' is converted to the ASCII backspace character,
so you need to either explicitly escape the backslash ('\\b'), or tell python it is a raw string (r'\b').
'''
re.findall('\b', 'testb') # without r flag , the backslash gets consumed by the python string interpreter and '\b' is converted to the ASCII backspace character. re module gets backspace.
#[]
re.findall('\\b', 'test') # backslash is explicitly escaped and is passed through to re module
#['', '']
re.findall(r'\b', 'test') # often this syntax is easier
#['', '']
'''
Search for pattern 'iii' in string 'piiig'.
On success, result.group() is resulted text.
# result[0] - the whole string
# result[1] - first group
# result[2] - second group and so on
'''
result = re.search(r'iii', 'piiig') # found, result.group() == "iii"
result = re.search(r'igs', 'piiig') # not found, result == None
if result != None:
result[0]
'''
\b Returns a match where the specified characters are at the beginning or at the end of a word
(the "r" in the beginning is making sure that the string is being treated as a "raw string")
r"\bain"
r"ain\b"
'''
result = re.search(r'\bain', 'it is aining asas') # found,'ain
result[0]
#if r flag is not used, \b is treated as a backspace
result = re.search('\bain', 'it is aining') # not found
## . = any single char but \n, so ... means 3 chars must result
result = re.search(r'..g', 'p1kgx') # found, result.group() == "1kg"
## \d = digit char,
# \w = alphanumeric and _ [a-zA-Z0-9_]
# In example below, 3 digits and 3 chars must result
result = re.search(r'\d\d\d', 'p123g') # found, result.group() == "123"
result = re.search(r'\w\w\w', '@@ab_1d!!') # found, result.group() == "ab1"
type(result)
result[0]
''' Repeatition
Things get more interesting when you use + and * to specify repetition in the pattern
+ -- 1 or more occurrences of the pattern to its left, e.g. 'i+' = one or more i's
* -- 0 or more occurrences of the pattern to its left
? -- result 0 or 1 occurrences of the pattern to its left
Leftmost & Largest
'''
## i+ = one or more i's, as many as possible.
result = re.search(r'pi+', 'piiig') # found, result.group() == "piii"
## Finds the first/leftmost solution, and within it drives the +
## as far as possible (aka 'leftmost and largest').
## In this example, note that it does not get to the second set of i's.
result = re.search(r'i+', 'piigiiii') # found, result.group() == "ii"
## \s* = zero or more whitespace chars
## Here look for 3 digits, possibly separated by whitespace.
result = re.search(r'\d\s*\d\s*\d', 'xx1 2 3xx') # found, result.group() == "1 2 3"
result = re.search(r'\d\s*\d\s*\d', 'xx12 3xx') # found, result.group() == "12 3"
result = re.search(r'\d\s*\d\s*\d', 'xx123xx') # found, result.group() == "123"
## ^ = results the start of string, so this fails:
result = re.search(r'^b\w+', 'foobar') # not found, result == None
## but without the ^ it succeeds:
result = re.search(r'b\w+', 'foobar') # found, result.group() == "bar"
## ? = 0 or 1 occurance
result = re.search(r'ssa?', 'ssa') # found, result.group() == "ssa"
result = re.search(r'ssa?', 'sdf') # not found
result = re.search(r'ssa?', 'ssdf') # found, result.group() == "ss"
#escape a special char e.g. \.
# @ does not need to be escaped. However if escaped , it does not make a difference and the result is the same
#square brackets
'''
Square brackets can be used to indicate a set of chars, so [abc] resultes 'a' or 'b' or 'c'.
The codes \w, \s etc. work inside square brackets too.
In sets, +, *, ., |, (), $,{} has no special meaning, so
[+] means: return a match for any '+' character in the string
dot (.) just means a literal dot.
For the emails problem, the square brackets are an easy way to add '.' and '-' to the set of chars which can appear around the @ with the pattern r'[\w.-]+@[\w.-]+'
\=
'''
result = re.search(r'[\w.-]+\@1[2-9]+', 'x@1122') # not found
result = re.search(r'[\w.-]+\@1[2-9]+', 'x@122') # found: x@122
result = re.search(r'[\w.-]+\@[\w-]+\.[\w]+', 'mukesh_khattar.k@swd-edc.com') # found: mukesh_khattar.k@swd-edc.com
# Inside a set, ^ in a set means exclude. In a normal use (r'^str' ) means starting with
# example - [^arn] Returns a match for any character EXCEPT a, r, and n
result = re.search(r'[^arn]', 'rit is aining')
result[0] # i
# escape [] if it is patterns e.g. if we need '[process id]' in the line below
line ='sqxwc wecwec[12121] xwcwecc'
result=re.search(r'\[\d+\]',line)
# print the result using one of the following
result.group(0) # '[12121]'
result[0] # '12121'
#in abobe if just process_id is needed
line ='sqxwc wecwec[12121] xwcwecc'
result=re.search(r'\[(\d+)\]',line)
result[0] # '[12121]'
result[1] # '12121'
#[a-zA-Z] - one char of a-z or A_Z
# ^ ; start of string
result = re.search(r'^[a-zA-Z][\w._-]+\@[\w_-]+\.[\w]+', 'mukesh_khattar.k@swd-edc.com') # found: mukesh_khattar.k@swd-edc.com
result = re.search(r'^[a-zA-Z][\w._-]+\@[\w_-]+\.[\w]+', '1mukesh_khattar.k@swd-edc.com') # not found
# '-' need to be escaped in set. + and . lose its special meaning when used isnide a set
pattern = '[\w\-+.]+\.[a-zA-Z]+'
result=re.search(pattern, 'a_b-c+d.wdwd.com')
result[0]
# the hour is between 1 and 12, with no leading zero, followed by a colon, then minutes between 00 and 59, then an optional space, and then AM or PM, in upper or lower case.
pattern = '([1]?[0-9]):([0-5][0-9])( ?)[AaPp][Mm]'
result = re.search(pattern, '2:29 PM')
result[0]
result[1]
result[2]
result[3]
result[4] # error
# OR condition use | . eg. first char needs to be upper char or digit AND two or more chars AND surrounded by ()
pattern = '\(([A-Z]|[0-9])[\w]+\)'
result = re.search(pattern, "wdwd(1aM)wdw") # True
result = re.search(pattern, "wdwd(AM)wdw") # True
result = re.search(pattern, "wdwd(aswd)wdw") # False
# ^- start of string , $ - end of string
#the text passed qualifies as a top-level web address, meaning that it contains alphanumeric characters (which includes
# letters, numbers, and underscores), as well as periods, dashes, and a plus sign,
# followed by a period and a character-only top-level domain such as ".com", ".info", ".edu", et
pattern = '^([\w\-+\.]+
|
'''
|
random_line_split
|
|
regex.py
|
Largest
'''
## i+ = one or more i's, as many as possible.
result = re.search(r'pi+', 'piiig') # found, result.group() == "piii"
## Finds the first/leftmost solution, and within it drives the +
## as far as possible (aka 'leftmost and largest').
## In this example, note that it does not get to the second set of i's.
result = re.search(r'i+', 'piigiiii') # found, result.group() == "ii"
## \s* = zero or more whitespace chars
## Here look for 3 digits, possibly separated by whitespace.
result = re.search(r'\d\s*\d\s*\d', 'xx1 2 3xx') # found, result.group() == "1 2 3"
result = re.search(r'\d\s*\d\s*\d', 'xx12 3xx') # found, result.group() == "12 3"
result = re.search(r'\d\s*\d\s*\d', 'xx123xx') # found, result.group() == "123"
## ^ = results the start of string, so this fails:
result = re.search(r'^b\w+', 'foobar') # not found, result == None
## but without the ^ it succeeds:
result = re.search(r'b\w+', 'foobar') # found, result.group() == "bar"
## ? = 0 or 1 occurance
result = re.search(r'ssa?', 'ssa') # found, result.group() == "ssa"
result = re.search(r'ssa?', 'sdf') # not found
result = re.search(r'ssa?', 'ssdf') # found, result.group() == "ss"
#escape a special char e.g. \.
# @ does not need to be escaped. However if escaped , it does not make a difference and the result is the same
#square brackets
'''
Square brackets can be used to indicate a set of chars, so [abc] resultes 'a' or 'b' or 'c'.
The codes \w, \s etc. work inside square brackets too.
In sets, +, *, ., |, (), $,{} has no special meaning, so
[+] means: return a match for any '+' character in the string
dot (.) just means a literal dot.
For the emails problem, the square brackets are an easy way to add '.' and '-' to the set of chars which can appear around the @ with the pattern r'[\w.-]+@[\w.-]+'
\=
'''
result = re.search(r'[\w.-]+\@1[2-9]+', 'x@1122') # not found
result = re.search(r'[\w.-]+\@1[2-9]+', 'x@122') # found: x@122
result = re.search(r'[\w.-]+\@[\w-]+\.[\w]+', 'mukesh_khattar.k@swd-edc.com') # found: mukesh_khattar.k@swd-edc.com
# Inside a set, ^ in a set means exclude. In a normal use (r'^str' ) means starting with
# example - [^arn] Returns a match for any character EXCEPT a, r, and n
result = re.search(r'[^arn]', 'rit is aining')
result[0] # i
# escape [] if it is patterns e.g. if we need '[process id]' in the line below
line ='sqxwc wecwec[12121] xwcwecc'
result=re.search(r'\[\d+\]',line)
# print the result using one of the following
result.group(0) # '[12121]'
result[0] # '12121'
#in abobe if just process_id is needed
line ='sqxwc wecwec[12121] xwcwecc'
result=re.search(r'\[(\d+)\]',line)
result[0] # '[12121]'
result[1] # '12121'
#[a-zA-Z] - one char of a-z or A_Z
# ^ ; start of string
result = re.search(r'^[a-zA-Z][\w._-]+\@[\w_-]+\.[\w]+', 'mukesh_khattar.k@swd-edc.com') # found: mukesh_khattar.k@swd-edc.com
result = re.search(r'^[a-zA-Z][\w._-]+\@[\w_-]+\.[\w]+', '1mukesh_khattar.k@swd-edc.com') # not found
# '-' need to be escaped in set. + and . lose its special meaning when used isnide a set
pattern = '[\w\-+.]+\.[a-zA-Z]+'
result=re.search(pattern, 'a_b-c+d.wdwd.com')
result[0]
# the hour is between 1 and 12, with no leading zero, followed by a colon, then minutes between 00 and 59, then an optional space, and then AM or PM, in upper or lower case.
pattern = '([1]?[0-9]):([0-5][0-9])( ?)[AaPp][Mm]'
result = re.search(pattern, '2:29 PM')
result[0]
result[1]
result[2]
result[3]
result[4] # error
# OR condition use | . eg. first char needs to be upper char or digit AND two or more chars AND surrounded by ()
pattern = '\(([A-Z]|[0-9])[\w]+\)'
result = re.search(pattern, "wdwd(1aM)wdw") # True
result = re.search(pattern, "wdwd(AM)wdw") # True
result = re.search(pattern, "wdwd(aswd)wdw") # False
# ^- start of string , $ - end of string
#the text passed qualifies as a top-level web address, meaning that it contains alphanumeric characters (which includes
# letters, numbers, and underscores), as well as periods, dashes, and a plus sign,
# followed by a period and a character-only top-level domain such as ".com", ".info", ".edu", et
pattern = '^([\w\-+\.]+)\/([a-zA-Z]+)$'
result = re.search(pattern, "web-addres.com/homepage") # True
result[0]
'''
group Extraction
Group result e.g. for email address (username_pattern)@(host_pattern)
The "group" feature of a regular expression allows you to pick out parts of the resulting text.
'''
str = 'purple alice-b@google.com monkey dishwasher'
result = re.search(r'([\w.-]+)@([\w.-]+)', str)
result[0] ## 'alice-b@google.com' (the whole result)
#extract groups, eq:
result[1] #'alice-b' (the username, group 1)
# eq:
result[2] ## 'google.com' (the host, group 2)
# pattern to find ln, fn where fn should include
# '-' does not need to be escaped if it is first or last char in set
name = 'last-name, firstname M.'
result = re.search(r'^([\w\s-]+), ([\w\s\.]+)$', name)
result[0]
result[1]
result[2]
# exact specified number of instances
zip='here it is 12121'
result = re.search(r'\d{5}', zip)
result[0] # '12121'
zip='as 1212 ss'
result = re.search(r'\d{5}', zip)
result[0] # None
# exactly 5 digits, and sometimes, but not always, followed by a dash with 4 more digits.
# The zip code needs to be preceded by at least one space, and cannot be at the start of the text.
#pattern = '(\w)+(\s)+[0-9][0-9][0-9][0-9][0-9](([-][0-9][0-9][0-9][0-9])|(\s))'
pattern = '(\w)+(\s)+[0-9]{5}(([-][0-9]{4})|(\s))'
result = re.search(pattern, "a 21212-0991 wdw") # True
result[0]
result = re.search(pattern, "a 21212 wdw") # True
result[0]
result = re.search(pattern, "a 2122 wdw") # False
if result:
result[0]
'''
findall() is probably the single most powerful function in the re module.
Above we used re.search() to find the first result for a pattern.
findall() finds *all* the resultes and returns them as a list of strings, with each string representing one result.
'''
str = 'purple alice@google.com, blah monkey bob@abc.com blah dishwasher'
## Here re.findall() returns a list of all the found email strings
emails = re.findall(r'[\w\.-]+@[\w\.-]+', str) ## ['alice@google.com', 'bob@abc.com']
for email in emails:
# do something with each found email string
|
print (email)
|
conditional_block
|
|
eth.rs
|
read-only, so svd2rust doens't generate bindings to
// modify them. Instead, as a workaround, we manually manipulate the
// bits
eth_mac
.mmc_tx_interrupt_mask
.modify(|r, w| w.bits(r.bits() | (1 << 27)));
eth_mac
.mmc_rx_interrupt_mask
.modify(|r, w| w.bits(r.bits() | (1 << 27)));
eth_mtl.mtlrx_qomr.modify(|_, w| {
w
// Receive store and forward
.rsf()
.set_bit()
// Dropping of TCP/IP checksum error frames disable
.dis_tcp_ef()
.clear_bit()
// Forward error frames
.fep()
.clear_bit()
// Forward undersized good packets
.fup()
.clear_bit()
});
eth_mtl.mtltx_qomr.modify(|_, w| {
w
// Transmit store and forward
.tsf()
.set_bit()
});
// operation mode register
eth_dma.dmamr.modify(|_, w| {
w.intm()
.bits(0b00)
// Rx Tx priority ratio 1:1
.pr()
.bits(0b000)
.txpr()
.clear_bit()
.da()
.clear_bit()
});
// bus mode register
eth_dma.dmasbmr.modify(|_, w| {
// Address-aligned beats
w.aal()
.set_bit()
// Fixed burst
.fb()
.set_bit()
});
eth_dma
.dmaccr
.modify(|_, w| w.dsl().bits(0).pblx8().clear_bit().mss().bits(536));
eth_dma.dmactx_cr.modify(|_, w| {
w
// Tx DMA PBL
.txpbl()
.bits(32)
.tse()
.clear_bit()
// Operate on second frame
.osf()
.clear_bit()
});
eth_dma.dmacrx_cr.modify(|_, w| {
w
// receive buffer size
.rbsz()
.bits(ETH_BUF_SIZE as u16)
// Rx DMA PBL
.rxpbl()
.bits(32)
// Disable flushing of received frames
.rpf()
.clear_bit()
});
// Initialise DMA descriptors
ring.tx.init();
ring.rx.init();
// Ensure the DMA descriptors are committed
cortex_m::asm::dsb();
// Manage MAC transmission and reception
eth_mac.maccr.modify(|_, w| {
w.re()
.bit(true) // Receiver Enable
.te()
.bit(true) // Transmiter Enable
});
eth_mtl.mtltx_qomr.modify(|_, w| w.ftq().set_bit());
// Manage DMA transmission and reception
eth_dma.dmactx_cr.modify(|_, w| w.st().set_bit());
eth_dma.dmacrx_cr.modify(|_, w| w.sr().set_bit());
eth_dma
.dmacsr
.modify(|_, w| w.tps().set_bit().rps().set_bit());
});
// MAC layer
// Set the MDC clock frequency in the range 1MHz - 2.5MHz
let hclk_mhz = clocks.hclk().raw() / 1_000_000;
let csr_clock_range = match hclk_mhz {
0..=34 => 2, // Divide by 16
35..=59 => 3, // Divide by 26
60..=99 => 0, // Divide by 42
100..=149 => 1, // Divide by 62
150..=249 => 4, // Divide by 102
250..=310 => 5, // Divide by 124
_ => panic!(
"HCLK results in MDC clock > 2.5MHz even for the \
highest CSR clock divider"
),
};
let mac = EthernetMAC {
eth_mac,
eth_phy_addr: 0,
clock_range: csr_clock_range,
};
let dma = EthernetDMA { ring, eth_dma };
(dma, mac)
}
impl EthernetMAC {
/// Sets the SMI address to use for the PHY
pub fn set_phy_addr(self, eth_phy_addr: u8) -> Self {
Self {
eth_mac: self.eth_mac,
eth_phy_addr,
clock_range: self.clock_range,
}
}
}
/// PHY Operations
impl StationManagement for EthernetMAC {
/// Read a register over SMI.
fn smi_read(&mut self, reg: u8) -> u16 {
while self.eth_mac.macmdioar.read().mb().bit_is_set() {}
self.eth_mac.macmdioar.modify(|_, w| unsafe {
w.pa()
.bits(self.eth_phy_addr)
.rda()
.bits(reg)
.goc()
.bits(0b11) // read
.cr()
.bits(self.clock_range)
.mb()
.set_bit()
});
while self.eth_mac.macmdioar.read().mb().bit_is_set() {}
self.eth_mac.macmdiodr.read().md().bits()
}
/// Write a register over SMI.
fn smi_write(&mut self, reg: u8, val: u16) {
while self.eth_mac.macmdioar.read().mb().bit_is_set() {}
self.eth_mac
.macmdiodr
.write(|w| unsafe { w.md().bits(val) });
self.eth_mac.macmdioar.modify(|_, w| unsafe {
w.pa()
.bits(self.eth_phy_addr)
.rda()
.bits(reg)
.goc()
.bits(0b01) // write
.cr()
.bits(self.clock_range)
.mb()
.set_bit()
});
while self.eth_mac.macmdioar.read().mb().bit_is_set() {}
}
}
/// Define TxToken type and implement consume method
pub struct TxToken<'a, const TD: usize>(&'a mut TDesRing<TD>);
impl<'a, const TD: usize> phy::TxToken for TxToken<'a, TD> {
fn consume<R, F>(self, len: usize, f: F) -> R
where
F: FnOnce(&mut [u8]) -> R,
{
assert!(len <= ETH_BUF_SIZE);
let result = f(unsafe { self.0.buf_as_slice_mut(len) });
self.0.release();
result
}
}
/// Define RxToken type and implement consume method
pub struct RxToken<'a, const RD: usize>(&'a mut RDesRing<RD>);
impl<'a, const RD: usize> phy::RxToken for RxToken<'a, RD> {
fn consume<R, F>(self, f: F) -> R
where
F: FnOnce(&mut [u8]) -> R,
{
let result = f(unsafe { self.0.buf_as_slice_mut() });
self.0.release();
result
}
}
/// Implement the smoltcp Device interface
impl<const TD: usize, const RD: usize> phy::Device for EthernetDMA<TD, RD> {
type RxToken<'a> = RxToken<'a, RD>;
type TxToken<'a> = TxToken<'a, TD>;
// Clippy false positive because DeviceCapabilities is non-exhaustive
#[allow(clippy::field_reassign_with_default)]
fn capabilities(&self) -> DeviceCapabilities {
let mut caps = DeviceCapabilities::default();
// ethernet frame type II (6 smac, 6 dmac, 2 ethertype),
// sans CRC (4), 1500 IP MTU
caps.max_transmission_unit = 1514;
caps.max_burst_size = Some(core::cmp::min(TD, RD));
caps
}
fn receive(
&mut self,
_timestamp: Instant,
) -> Option<(RxToken<RD>, TxToken<TD>)> {
// Skip all queued packets with errors.
while self.ring.rx.available() && !self.ring.rx.valid() {
self.ring.rx.release()
}
if self.ring.rx.available() && self.ring.tx.available() {
Some((RxToken(&mut self.ring.rx), TxToken(&mut self.ring.tx)))
} else {
None
}
}
fn transmit(&mut self, _timestamp: Instant) -> Option<TxToken<TD>> {
if self.ring.tx.available() {
Some(TxToken(&mut self.ring.tx))
} else {
None
}
}
}
impl<const TD: usize, const RD: usize> EthernetDMA<TD, RD> {
/// Return the number of packets dropped since this method was
/// last called
pub fn
|
number_packets_dropped
|
identifier_name
|
|
eth.rs
|
frames disable
.dis_tcp_ef()
.clear_bit()
// Forward error frames
.fep()
.clear_bit()
// Forward undersized good packets
.fup()
.clear_bit()
});
eth_mtl.mtltx_qomr.modify(|_, w| {
w
// Transmit store and forward
.tsf()
.set_bit()
});
// operation mode register
eth_dma.dmamr.modify(|_, w| {
w.intm()
.bits(0b00)
// Rx Tx priority ratio 1:1
.pr()
.bits(0b000)
.txpr()
.clear_bit()
.da()
.clear_bit()
});
// bus mode register
eth_dma.dmasbmr.modify(|_, w| {
// Address-aligned beats
w.aal()
.set_bit()
// Fixed burst
.fb()
.set_bit()
});
eth_dma
.dmaccr
.modify(|_, w| w.dsl().bits(0).pblx8().clear_bit().mss().bits(536));
eth_dma.dmactx_cr.modify(|_, w| {
w
// Tx DMA PBL
.txpbl()
.bits(32)
.tse()
.clear_bit()
// Operate on second frame
.osf()
.clear_bit()
});
eth_dma.dmacrx_cr.modify(|_, w| {
w
// receive buffer size
.rbsz()
.bits(ETH_BUF_SIZE as u16)
// Rx DMA PBL
.rxpbl()
.bits(32)
// Disable flushing of received frames
.rpf()
.clear_bit()
});
// Initialise DMA descriptors
ring.tx.init();
ring.rx.init();
// Ensure the DMA descriptors are committed
cortex_m::asm::dsb();
// Manage MAC transmission and reception
eth_mac.maccr.modify(|_, w| {
w.re()
.bit(true) // Receiver Enable
.te()
.bit(true) // Transmiter Enable
});
eth_mtl.mtltx_qomr.modify(|_, w| w.ftq().set_bit());
// Manage DMA transmission and reception
eth_dma.dmactx_cr.modify(|_, w| w.st().set_bit());
eth_dma.dmacrx_cr.modify(|_, w| w.sr().set_bit());
eth_dma
.dmacsr
.modify(|_, w| w.tps().set_bit().rps().set_bit());
});
// MAC layer
// Set the MDC clock frequency in the range 1MHz - 2.5MHz
let hclk_mhz = clocks.hclk().raw() / 1_000_000;
let csr_clock_range = match hclk_mhz {
0..=34 => 2, // Divide by 16
35..=59 => 3, // Divide by 26
60..=99 => 0, // Divide by 42
100..=149 => 1, // Divide by 62
150..=249 => 4, // Divide by 102
250..=310 => 5, // Divide by 124
_ => panic!(
"HCLK results in MDC clock > 2.5MHz even for the \
highest CSR clock divider"
),
};
let mac = EthernetMAC {
eth_mac,
eth_phy_addr: 0,
clock_range: csr_clock_range,
};
let dma = EthernetDMA { ring, eth_dma };
(dma, mac)
}
impl EthernetMAC {
/// Sets the SMI address to use for the PHY
pub fn set_phy_addr(self, eth_phy_addr: u8) -> Self {
Self {
eth_mac: self.eth_mac,
eth_phy_addr,
clock_range: self.clock_range,
}
}
}
/// PHY Operations
impl StationManagement for EthernetMAC {
/// Read a register over SMI.
fn smi_read(&mut self, reg: u8) -> u16 {
while self.eth_mac.macmdioar.read().mb().bit_is_set() {}
self.eth_mac.macmdioar.modify(|_, w| unsafe {
w.pa()
.bits(self.eth_phy_addr)
.rda()
.bits(reg)
.goc()
.bits(0b11) // read
.cr()
.bits(self.clock_range)
.mb()
.set_bit()
});
while self.eth_mac.macmdioar.read().mb().bit_is_set() {}
self.eth_mac.macmdiodr.read().md().bits()
}
/// Write a register over SMI.
fn smi_write(&mut self, reg: u8, val: u16) {
while self.eth_mac.macmdioar.read().mb().bit_is_set() {}
self.eth_mac
.macmdiodr
.write(|w| unsafe { w.md().bits(val) });
self.eth_mac.macmdioar.modify(|_, w| unsafe {
w.pa()
.bits(self.eth_phy_addr)
.rda()
.bits(reg)
.goc()
.bits(0b01) // write
.cr()
.bits(self.clock_range)
.mb()
.set_bit()
});
while self.eth_mac.macmdioar.read().mb().bit_is_set() {}
}
}
/// Define TxToken type and implement consume method
pub struct TxToken<'a, const TD: usize>(&'a mut TDesRing<TD>);
impl<'a, const TD: usize> phy::TxToken for TxToken<'a, TD> {
fn consume<R, F>(self, len: usize, f: F) -> R
where
F: FnOnce(&mut [u8]) -> R,
{
assert!(len <= ETH_BUF_SIZE);
let result = f(unsafe { self.0.buf_as_slice_mut(len) });
self.0.release();
result
}
}
/// Define RxToken type and implement consume method
pub struct RxToken<'a, const RD: usize>(&'a mut RDesRing<RD>);
impl<'a, const RD: usize> phy::RxToken for RxToken<'a, RD> {
fn consume<R, F>(self, f: F) -> R
where
F: FnOnce(&mut [u8]) -> R,
{
let result = f(unsafe { self.0.buf_as_slice_mut() });
self.0.release();
result
}
}
/// Implement the smoltcp Device interface
impl<const TD: usize, const RD: usize> phy::Device for EthernetDMA<TD, RD> {
type RxToken<'a> = RxToken<'a, RD>;
type TxToken<'a> = TxToken<'a, TD>;
// Clippy false positive because DeviceCapabilities is non-exhaustive
#[allow(clippy::field_reassign_with_default)]
fn capabilities(&self) -> DeviceCapabilities {
let mut caps = DeviceCapabilities::default();
// ethernet frame type II (6 smac, 6 dmac, 2 ethertype),
// sans CRC (4), 1500 IP MTU
caps.max_transmission_unit = 1514;
caps.max_burst_size = Some(core::cmp::min(TD, RD));
caps
}
fn receive(
&mut self,
_timestamp: Instant,
) -> Option<(RxToken<RD>, TxToken<TD>)> {
// Skip all queued packets with errors.
while self.ring.rx.available() && !self.ring.rx.valid() {
self.ring.rx.release()
}
if self.ring.rx.available() && self.ring.tx.available() {
Some((RxToken(&mut self.ring.rx), TxToken(&mut self.ring.tx)))
} else {
None
}
}
fn transmit(&mut self, _timestamp: Instant) -> Option<TxToken<TD>> {
if self.ring.tx.available() {
Some(TxToken(&mut self.ring.tx))
} else {
None
}
}
}
impl<const TD: usize, const RD: usize> EthernetDMA<TD, RD> {
/// Return the number of packets dropped since this method was
/// last called
pub fn number_packets_dropped(&self) -> u32 {
self.eth_dma.dmacmfcr.read().mfc().bits() as u32
}
}
/// Clears the Ethernet interrupt flag
///
/// # Safety
///
/// This method implements a single register write to DMACSR
pub unsafe fn interrupt_handler()
|
{
let eth_dma = &*stm32::ETHERNET_DMA::ptr();
eth_dma
.dmacsr
.write(|w| w.nis().set_bit().ri().set_bit().ti().set_bit());
let _ = eth_dma.dmacsr.read();
let _ = eth_dma.dmacsr.read(); // Delay 2 peripheral clocks
}
|
identifier_body
|
|
eth.rs
|
DMA engine ownership
// Ensure changes to the descriptor are committed before
// DMA engine sees tail pointer store
cortex_m::asm::dsb();
// Move the tail pointer (TPR) to the next descriptor
let x = (x + 1) % TD;
unsafe {
let dma = &*stm32::ETHERNET_DMA::ptr();
dma.dmactx_dtpr
.write(|w| w.bits(&(self.td[x]) as *const _ as u32));
}
self.tdidx = x;
}
/// Access the buffer pointed to by the next TDes
pub unsafe fn buf_as_slice_mut(&mut self, length: usize) -> &mut [u8] {
let x = self.tdidx;
// Set address in descriptor
self.td[x].tdes0 = ptr::addr_of!(self.tbuf[x]) as u32; // Buffer 1
// Set length in descriptor
let len = core::cmp::min(length, ETH_BUF_SIZE);
self.td[x].tdes2 = (length as u32) & EMAC_TDES2_B1L;
// Create a raw pointer in place without an intermediate reference. Use
// this to return a slice from the packed buffer
let addr = ptr::addr_of_mut!(self.tbuf[x]) as *mut _;
core::slice::from_raw_parts_mut(addr, len)
}
}
/// Receive Descriptor representation
///
/// * rdes0: recieve buffer address
/// * rdes1:
/// * rdes2:
/// * rdes3: OWN and Status
///
/// Note that Copy and Clone are derived to support initialising an
/// array of RDes, but you may not move a RDes after its address has
/// been given to the ETH_DMA engine.
#[derive(Copy, Clone)]
#[repr(C, packed)]
struct RDes {
rdes0: u32,
rdes1: u32,
rdes2: u32,
rdes3: u32,
}
impl RDes {
/// Initialises RDes
pub fn init(&mut self) {
self.rdes0 = 0;
self.rdes1 = 0;
self.rdes2 = 0;
self.rdes3 = 0; // Owned by us
}
/// Return true if this RDes is acceptable to us
pub fn valid(&self) -> bool {
|
// Contains first buffer of packet AND contains last buf of
// packet AND no errors AND not a contex descriptor
self.rdes3
& (EMAC_DES3_FD | EMAC_DES3_LD | EMAC_DES3_ES | EMAC_DES3_CTXT)
== (EMAC_DES3_FD | EMAC_DES3_LD)
}
/// Return true if this RDes is not currently owned by the DMA
pub fn available(&self) -> bool {
self.rdes3 & EMAC_DES3_OWN == 0 // Owned by us
}
}
/// Store a ring of RDes and associated buffers
#[repr(C, packed)]
struct RDesRing<const RD: usize> {
rd: [RDes; RD],
rbuf: [[u32; ETH_BUF_SIZE / 4]; RD],
rdidx: usize,
}
impl<const RD: usize> RDesRing<RD> {
const fn new() -> Self {
Self {
rd: [RDes {
rdes0: 0,
rdes1: 0,
rdes2: 0,
rdes3: 0,
}; RD],
rbuf: [[0; ETH_BUF_SIZE / 4]; RD],
rdidx: 0,
}
}
/// Initialise this RDesRing. Assume RDesRing is corrupt
///
/// The current memory address of the buffers inside this RDesRing
/// will be stored in the descriptors, so ensure the RDesRing is
/// not moved after initialisation.
pub fn init(&mut self) {
for x in 0..RD {
self.rd[x].init();
}
self.rdidx = 0;
// Initialise pointers in the DMA engine
unsafe {
let dma = &*stm32::ETHERNET_DMA::ptr();
dma.dmacrx_dlar
.write(|w| w.bits(&self.rd[0] as *const _ as u32));
dma.dmacrx_rlr.write(|w| w.rdrl().bits(RD as u16 - 1));
}
// Release descriptors to the DMA engine
while self.available() {
self.release()
}
}
/// Return true if a RDes is available for use
pub fn available(&self) -> bool {
self.rd[self.rdidx].available()
}
/// Return true if current RDes is valid
pub fn valid(&self) -> bool {
self.rd[self.rdidx].valid()
}
/// Release the next RDes to the DMA engine
pub fn release(&mut self) {
let x = self.rdidx;
assert!(self.rd[x].rdes3 & EMAC_DES3_OWN == 0); // Owned by us
let address = ptr::addr_of!(self.rbuf[x]) as u32;
// Read format
self.rd[x].rdes0 = address; // Buffer 1
self.rd[x].rdes1 = 0; // Reserved
self.rd[x].rdes2 = 0; // Marked as invalid
self.rd[x].rdes3 = 0;
self.rd[x].rdes3 |= EMAC_DES3_OWN; // Give the DMA engine ownership
self.rd[x].rdes3 |= EMAC_RDES3_BUF1V; // BUF1V: 1st buffer address is valid
self.rd[x].rdes3 |= EMAC_RDES3_IOC; // IOC: Interrupt on complete
// Ensure changes to the descriptor are committed before
// DMA engine sees tail pointer store
cortex_m::asm::dsb();
// Move the tail pointer (TPR) to this descriptor
unsafe {
let dma = &*stm32::ETHERNET_DMA::ptr();
dma.dmacrx_dtpr
.write(|w| w.bits(&(self.rd[x]) as *const _ as u32));
}
// Update active descriptor
self.rdidx = (x + 1) % RD;
}
/// Access the buffer pointed to by the next RDes
///
/// # Safety
///
/// Ensure that release() is called between subsequent calls to this
/// function.
#[allow(clippy::mut_from_ref)]
pub unsafe fn buf_as_slice_mut(&self) -> &mut [u8] {
let x = self.rdidx;
// Write-back format
let addr = ptr::addr_of!(self.rbuf[x]) as *mut u8;
let len = (self.rd[x].rdes3 & EMAC_RDES3_PL) as usize;
let len = core::cmp::min(len, ETH_BUF_SIZE);
core::slice::from_raw_parts_mut(addr, len)
}
}
pub struct DesRing<const TD: usize, const RD: usize> {
tx: TDesRing<TD>,
rx: RDesRing<RD>,
}
impl<const TD: usize, const RD: usize> DesRing<TD, RD> {
pub const fn new() -> Self {
DesRing {
tx: TDesRing::new(),
rx: RDesRing::new(),
}
}
}
impl<const TD: usize, const RD: usize> Default for DesRing<TD, RD> {
fn default() -> Self {
Self::new()
}
}
///
/// Ethernet DMA
///
pub struct EthernetDMA<const TD: usize, const RD: usize> {
ring: &'static mut DesRing<TD, RD>,
eth_dma: stm32::ETHERNET_DMA,
}
///
/// Ethernet MAC
///
pub struct EthernetMAC {
eth_mac: stm32::ETHERNET_MAC,
eth_phy_addr: u8,
clock_range: u8,
}
/// Create and initialise the ethernet driver.
///
/// You must move in ETH_MAC, ETH_MTL, ETH_DMA.
///
/// Sets up the descriptor structures, sets up the peripheral
/// clocks and GPIO configuration, and configures the ETH MAC and
/// DMA peripherals. Automatically sets slew rate to VeryHigh.
/// If you wish to use another configuration, please see
/// [new_unchecked](new_unchecked).
///
/// This method does not initialise the external PHY. However it does return an
/// [EthernetMAC](EthernetMAC) which implements the
/// [StationManagement](super::StationManagement) trait. This can be used to
/// communicate with the external PHY.
///
/// # Safety
///
/// `EthernetDMA` shall not be moved as it is initialised here
#[allow(clippy::too_many_arguments)]
pub fn new<const TD: usize, const RD: usize>(
eth_mac: stm32::ETHERNET_MAC,
eth_mtl: stm32::ETHERNET_MTL,
eth_dma: stm32::ETHERNET_DMA,
mut pins: impl PinsRMII,
ring: &'static mut DesRing<T
|
// Write-back descriptor is valid if:
//
|
random_line_split
|
models.py
|
- means a fixed day of fourth class falling on 19 Nov
"""
lang = None
def __init__(self, observance_id: str, date_: date, lang: str):
""" Build an Observance out of identifier and calendar date
:param observance_id: observance's identifier in format
<flexibility>:<identifier>:<rank>
:type observance_id: string
:param date_: specific date in which the observance is supposed
to be placed. For some Sancti days its rank (class)
depends on which calendar day they occur.
:type date_: `date ` object
"""
self.lang = lang
translation = importlib.import_module(f'constants.{lang}.translation')
flexibility, name, rank = observance_id.split(':')
self.flexibility: str = flexibility
self.name: str = name
self.rank: int = self._calc_rank(observance_id, date_, int(rank))
self.id: str = ':'.join((self.flexibility, self.name, str(self.rank)))
self.title: str = translation.titles.get(observance_id)
if flexibility == TYPE_TEMPORA and observance_id not in (C_10A, C_10B, C_10C, C_10PASC, C_10T):
self.weekday = WEEKDAY_MAPPING[re.sub('^.*-(\d+).*$', '\\1', name)]
else:
self.weekday = date_.weekday()
self.priority = self._calc_priority()
def get_proper(self) -> Tuple['Proper', 'Proper']:
return ProperParser.parse(self.id, self.lang)
def serialize(self) -> dict:
return {'id': self.id, 'rank': self.rank, 'title': self.title}
def _calc_rank(self, observance_id: str, date_: date, original_rank: int) -> int:
"""
Some observance's ranks depend on calendar day on which they fall, for example:
Advent feria days between 17 and 23 December are 2 class,
while other feria Advent days are 3 class;
"""
for case in TEMPORA_RANK_MAP:
if date_.month == case['month'] and date_.day == case['day'] and re.match(case['pattern'], observance_id):
return case['rank']
return original_rank
def _calc_priority(self) -> Union[None, int]:
"""
Calculate priority according to the Precedence Table.
"""
for priority, pattern in enumerate(TABLE_OF_PRECEDENCE):
if re.match(pattern, self.id):
return priority
def __repr__(self):
return "<{}>".format(self.id)
def __eq__(self, other):
return not self.rank < other.rank and not other.rank < self.rank
def __ne__(self, other):
return self.rank < other.rank or other.rank < self.rank
def __ge__(self, other):
return not self.rank < other.rank
def __gt__(self, other):
return other.rank > self.rank
def __lt__(self, other):
return other.rank < self.rank
def __le__(self, other):
return not other.rank > self.rank
class Day:
""" Class used to keep `Observance` objects for particular days of Missal.
It contains three lists: `tempora`, `celebration` and `commemoration`.
On Missal's creation the lists are filled in so that `tempora` always contains `Observance` representing
given variable day, `celebration` contains an `Observance`s to be celebrated in this day and
`commemoration` contains zero or more `Observance`s that should be commemorated with the main celebration.
"""
calendar: 'Calendar' = None
tempora: List['Observance'] = None
celebration: List['Observance'] = None
commemoration: List['Observance'] = None
def __init__(self, date_: date, calendar: 'Calendar') -> None:
self.date = date_
self.calendar = calendar
self.tempora = []
self.celebration = []
self.commemoration = []
@property
def all(self) -> List['Observance']:
return self.tempora + self.celebration + self.commemoration
def get_tempora_name(self) -> Union[None, str]:
if self.tempora:
return self.tempora[0].title
def get_celebration_name(self) -> Union[None, str]:
if self.celebration:
return self.celebration[0].title
def get_proper(self) -> List[Tuple['Proper', 'Proper']]:
"""
Get proper that is used in today Mass. If given day does not have a dedicated proper,
use the one from the latest Sunday.
"""
if self.celebration:
try:
return [i.get_proper() for i in self.celebration]
except ProperNotFound as e:
if self.celebration[0].flexibility == 'sancti':
log.error(e)
# No proper for this day, trying to get one from the latest Sunday
date_: date = copy(self.date)
while date_.weekday() != 6: # Sunday
if date_ == datetime.date(self.date.year, 1, 1):
break
date_ = date_ - datetime.timedelta(days=1)
day: Day = self.calendar.get_day(date_)
# Handling exceptions
if day.celebration[0].id == TEMPORA_EPI1_0:
# "Feast of the Holy Family" replaces "First Sunday after Epiphany"; use the latter in
# following days without the own proper
return [ProperParser.parse(TEMPORA_EPI1_0A, self.calendar.lang)]
if day.celebration[0].id == TEMPORA_PENT01_0:
# "Trinity Sunday" replaces "1st Sunday after Pentecost"; use the latter in
# following days without the own proper
return [ProperParser.parse(PENT01_0A, self.calendar.lang)]
if day.tempora:
return [day.tempora[0].get_proper()]
return [day.celebration[0].get_proper()]
def serialize(self) -> dict:
serialized = {}
for container in ('tempora', 'celebration', 'commemoration'):
serialized[container] = [i.serialize() for i in getattr(self, container)]
return serialized
def __str__(self):
return str(self.tempora) + str(self.celebration) + str(self.commemoration)
class Calendar:
"""
Class representing a Calendar.
Internally it keeps the data in an ordered dict of `Days`s where each key is a `date` object and value
is a `Day` containing `Observance` objects organized inside Day's members. Example:
{
...
datetime.date(2008, 5, 3): Day(tempora:[Observance<tempora:Pasc5-6:4>]
celebration:[Observance<sancti:05-03-1:1>],
commemoration:[])
datetime.date(2008, 5, 4): Day(tempora:[Observance<tempora:Pasc6-0:2>],
celebration:[Observance<sancti:05-04-1:3>]
commemoration:[])
datetime.date(2008, 5, 5): Day(tempora:[Observance<tempora:Pasc6-1:4>],
celebration:[Observance<sancti:05-05-1:3>]
commemoration:[])
datetime.date(2008, 5, 6): Day(tempora:[Observance<tempora:Pasc6-2:4>],
celebration:[Observance<tempora:Pasc6-2:4>]
commemoration:[])
...
}
"""
lang = None
_container = None
def __init__(self, year: int, lang: str) -> None:
""" Build a calendar and fill it in with empty `Day` objects
"""
self.lang = lang
self._container = OrderedDict()
self._build_empty_calendar(year)
def _build_empty_calendar(self, year: int) -> None:
date_ = date(year, 1, 1)
while date_.year == year:
self._container[date_] = Day(date_, self)
date_ += timedelta(days=1)
def get_day(self, date_: datetime.date) -> Day:
return self._container.get(date_)
def find_day(self, observance_id: str) -> Union[None, Tuple[date, Day]]:
""" Return a day representation by observance ID, if any
:param observance_id: observance's identifier, for example TEMPORA_EPI6_0
:type observance_id: string
:return: day representation
:rtype: list(datetime, list)
"""
for date_, day in self._container.items():
|
if observance_id in [ii.id for ii in day.all]:
return date_, day
|
conditional_block
|
|
models.py
|
DAY_MAPPING)
from propers.parser import ProperParser
log = logging.getLogger(__name__)
class Observance:
"""
A class representing a single observance, such as "The first Friday after Pentecost" or "Assumption of Mary".
It parses observance's ID and extracts weekday, day's class/rank and human readable identifier.
Example:
'tempora:Epi2-4:4'
rank: 4
weekday: 3
name: Epi2-4
Each identifier consists of three colon-separated elements:
flexibility - determines if it's a fixed (sancti) or movable (tempora) observance
identifier - a unique human readable observance identifier. In case of movable
days it's a day's name, in case of 'sancti' days it contains a date
in format %m-%d
rank - observance's class, a number between 1 and 4
Example:
'tempora:Epi2-3:4' - means movable day of fourth class
which is third feria day (Wednesday) in second week after Epiphany
'sancti:11-19:4' - means a fixed day of fourth class falling on 19 Nov
"""
lang = None
def __init__(self, observance_id: str, date_: date, lang: str):
""" Build an Observance out of identifier and calendar date
:param observance_id: observance's identifier in format
<flexibility>:<identifier>:<rank>
:type observance_id: string
:param date_: specific date in which the observance is supposed
to be placed. For some Sancti days its rank (class)
depends on which calendar day they occur.
:type date_: `date ` object
"""
self.lang = lang
translation = importlib.import_module(f'constants.{lang}.translation')
flexibility, name, rank = observance_id.split(':')
self.flexibility: str = flexibility
self.name: str = name
self.rank: int = self._calc_rank(observance_id, date_, int(rank))
self.id: str = ':'.join((self.flexibility, self.name, str(self.rank)))
self.title: str = translation.titles.get(observance_id)
if flexibility == TYPE_TEMPORA and observance_id not in (C_10A, C_10B, C_10C, C_10PASC, C_10T):
self.weekday = WEEKDAY_MAPPING[re.sub('^.*-(\d+).*$', '\\1', name)]
else:
self.weekday = date_.weekday()
self.priority = self._calc_priority()
def get_proper(self) -> Tuple['Proper', 'Proper']:
return ProperParser.parse(self.id, self.lang)
def serialize(self) -> dict:
return {'id': self.id, 'rank': self.rank, 'title': self.title}
def _calc_rank(self, observance_id: str, date_: date, original_rank: int) -> int:
"""
Some observance's ranks depend on calendar day on which they fall, for example:
Advent feria days between 17 and 23 December are 2 class,
while other feria Advent days are 3 class;
"""
for case in TEMPORA_RANK_MAP:
if date_.month == case['month'] and date_.day == case['day'] and re.match(case['pattern'], observance_id):
return case['rank']
return original_rank
def _calc_priority(self) -> Union[None, int]:
"""
Calculate priority according to the Precedence Table.
"""
for priority, pattern in enumerate(TABLE_OF_PRECEDENCE):
if re.match(pattern, self.id):
return priority
def __repr__(self):
return "<{}>".format(self.id)
def __eq__(self, other):
return not self.rank < other.rank and not other.rank < self.rank
def __ne__(self, other):
return self.rank < other.rank or other.rank < self.rank
def __ge__(self, other):
return not self.rank < other.rank
def __gt__(self, other):
return other.rank > self.rank
def __lt__(self, other):
return other.rank < self.rank
def __le__(self, other):
return not other.rank > self.rank
class Day:
""" Class used to keep `Observance` objects for particular days of Missal.
It contains three lists: `tempora`, `celebration` and `commemoration`.
On Missal's creation the lists are filled in so that `tempora` always contains `Observance` representing
given variable day, `celebration` contains an `Observance`s to be celebrated in this day and
`commemoration` contains zero or more `Observance`s that should be commemorated with the main celebration.
"""
calendar: 'Calendar' = None
tempora: List['Observance'] = None
celebration: List['Observance'] = None
commemoration: List['Observance'] = None
def __init__(self, date_: date, calendar: 'Calendar') -> None:
self.date = date_
self.calendar = calendar
self.tempora = []
self.celebration = []
self.commemoration = []
@property
def all(self) -> List['Observance']:
return self.tempora + self.celebration + self.commemoration
def get_tempora_name(self) -> Union[None, str]:
if self.tempora:
return self.tempora[0].title
def
|
(self) -> Union[None, str]:
if self.celebration:
return self.celebration[0].title
def get_proper(self) -> List[Tuple['Proper', 'Proper']]:
"""
Get proper that is used in today Mass. If given day does not have a dedicated proper,
use the one from the latest Sunday.
"""
if self.celebration:
try:
return [i.get_proper() for i in self.celebration]
except ProperNotFound as e:
if self.celebration[0].flexibility == 'sancti':
log.error(e)
# No proper for this day, trying to get one from the latest Sunday
date_: date = copy(self.date)
while date_.weekday() != 6: # Sunday
if date_ == datetime.date(self.date.year, 1, 1):
break
date_ = date_ - datetime.timedelta(days=1)
day: Day = self.calendar.get_day(date_)
# Handling exceptions
if day.celebration[0].id == TEMPORA_EPI1_0:
# "Feast of the Holy Family" replaces "First Sunday after Epiphany"; use the latter in
# following days without the own proper
return [ProperParser.parse(TEMPORA_EPI1_0A, self.calendar.lang)]
if day.celebration[0].id == TEMPORA_PENT01_0:
# "Trinity Sunday" replaces "1st Sunday after Pentecost"; use the latter in
# following days without the own proper
return [ProperParser.parse(PENT01_0A, self.calendar.lang)]
if day.tempora:
return [day.tempora[0].get_proper()]
return [day.celebration[0].get_proper()]
def serialize(self) -> dict:
serialized = {}
for container in ('tempora', 'celebration', 'commemoration'):
serialized[container] = [i.serialize() for i in getattr(self, container)]
return serialized
def __str__(self):
return str(self.tempora) + str(self.celebration) + str(self.commemoration)
class Calendar:
"""
Class representing a Calendar.
Internally it keeps the data in an ordered dict of `Days`s where each key is a `date` object and value
is a `Day` containing `Observance` objects organized inside Day's members. Example:
{
...
datetime.date(2008, 5, 3): Day(tempora:[Observance<tempora:Pasc5-6:4>]
celebration:[Observance<sancti:05-03-1:1>],
commemoration:[])
datetime.date(2008, 5, 4): Day(tempora:[Observance<tempora:Pasc6-0:2>],
celebration:[Observance<sancti:05-04-1:3>]
commemoration:[])
datetime.date(2008, 5, 5): Day(tempora:[Observance<tempora:Pasc6-1:4>],
celebration:[Observance<sancti:05-05-1:3>]
commemoration:[])
datetime.date(2008, 5, 6): Day(tempora:[Observance<tempora:Pasc6-2:4>],
celebration:[Observance<tempora:Pasc6-2:4>]
commemoration:[])
...
}
"""
lang =
|
get_celebration_name
|
identifier_name
|
models.py
|
DAY_MAPPING)
from propers.parser import ProperParser
log = logging.getLogger(__name__)
class Observance:
"""
A class representing a single observance, such as "The first Friday after Pentecost" or "Assumption of Mary".
It parses observance's ID and extracts weekday, day's class/rank and human readable identifier.
Example:
'tempora:Epi2-4:4'
rank: 4
weekday: 3
name: Epi2-4
Each identifier consists of three colon-separated elements:
flexibility - determines if it's a fixed (sancti) or movable (tempora) observance
identifier - a unique human readable observance identifier. In case of movable
days it's a day's name, in case of 'sancti' days it contains a date
in format %m-%d
rank - observance's class, a number between 1 and 4
Example:
'tempora:Epi2-3:4' - means movable day of fourth class
which is third feria day (Wednesday) in second week after Epiphany
'sancti:11-19:4' - means a fixed day of fourth class falling on 19 Nov
"""
lang = None
def __init__(self, observance_id: str, date_: date, lang: str):
""" Build an Observance out of identifier and calendar date
:param observance_id: observance's identifier in format
<flexibility>:<identifier>:<rank>
:type observance_id: string
:param date_: specific date in which the observance is supposed
to be placed. For some Sancti days its rank (class)
depends on which calendar day they occur.
:type date_: `date ` object
"""
self.lang = lang
translation = importlib.import_module(f'constants.{lang}.translation')
flexibility, name, rank = observance_id.split(':')
self.flexibility: str = flexibility
self.name: str = name
self.rank: int = self._calc_rank(observance_id, date_, int(rank))
self.id: str = ':'.join((self.flexibility, self.name, str(self.rank)))
self.title: str = translation.titles.get(observance_id)
if flexibility == TYPE_TEMPORA and observance_id not in (C_10A, C_10B, C_10C, C_10PASC, C_10T):
self.weekday = WEEKDAY_MAPPING[re.sub('^.*-(\d+).*$', '\\1', name)]
else:
self.weekday = date_.weekday()
self.priority = self._calc_priority()
def get_proper(self) -> Tuple['Proper', 'Proper']:
return ProperParser.parse(self.id, self.lang)
def serialize(self) -> dict:
return {'id': self.id, 'rank': self.rank, 'title': self.title}
def _calc_rank(self, observance_id: str, date_: date, original_rank: int) -> int:
"""
Some observance's ranks depend on calendar day on which they fall, for example:
Advent feria days between 17 and 23 December are 2 class,
while other feria Advent days are 3 class;
"""
for case in TEMPORA_RANK_MAP:
if date_.month == case['month'] and date_.day == case['day'] and re.match(case['pattern'], observance_id):
return case['rank']
return original_rank
def _calc_priority(self) -> Union[None, int]:
"""
Calculate priority according to the Precedence Table.
"""
for priority, pattern in enumerate(TABLE_OF_PRECEDENCE):
if re.match(pattern, self.id):
return priority
def __repr__(self):
return "<{}>".format(self.id)
def __eq__(self, other):
return not self.rank < other.rank and not other.rank < self.rank
def __ne__(self, other):
return self.rank < other.rank or other.rank < self.rank
def __ge__(self, other):
return not self.rank < other.rank
def __gt__(self, other):
return other.rank > self.rank
def __lt__(self, other):
return other.rank < self.rank
def __le__(self, other):
return not other.rank > self.rank
class Day:
""" Class used to keep `Observance` objects for particular days of Missal.
It contains three lists: `tempora`, `celebration` and `commemoration`.
On Missal's creation the lists are filled in so that `tempora` always contains `Observance` representing
given variable day, `celebration` contains an `Observance`s to be celebrated in this day and
`commemoration` contains zero or more `Observance`s that should be commemorated with the main celebration.
"""
calendar: 'Calendar' = None
tempora: List['Observance'] = None
celebration: List['Observance'] = None
commemoration: List['Observance'] = None
def __init__(self, date_: date, calendar: 'Calendar') -> None:
self.date = date_
self.calendar = calendar
self.tempora = []
self.celebration = []
self.commemoration = []
@property
def all(self) -> List['Observance']:
return self.tempora + self.celebration + self.commemoration
def get_tempora_name(self) -> Union[None, str]:
if self.tempora:
return self.tempora[0].title
def get_celebration_name(self) -> Union[None, str]:
if self.celebration:
return self.celebration[0].title
def get_proper(self) -> List[Tuple['Proper', 'Proper']]:
|
# "Feast of the Holy Family" replaces "First Sunday after Epiphany"; use the latter in
# following days without the own proper
return [ProperParser.parse(TEMPORA_EPI1_0A, self.calendar.lang)]
if day.celebration[0].id == TEMPORA_PENT01_0:
# "Trinity Sunday" replaces "1st Sunday after Pentecost"; use the latter in
# following days without the own proper
return [ProperParser.parse(PENT01_0A, self.calendar.lang)]
if day.tempora:
return [day.tempora[0].get_proper()]
return [day.celebration[0].get_proper()]
def serialize(self) -> dict:
serialized = {}
for container in ('tempora', 'celebration', 'commemoration'):
serialized[container] = [i.serialize() for i in getattr(self, container)]
return serialized
def __str__(self):
return str(self.tempora) + str(self.celebration) + str(self.commemoration)
class Calendar:
"""
Class representing a Calendar.
Internally it keeps the data in an ordered dict of `Days`s where each key is a `date` object and value
is a `Day` containing `Observance` objects organized inside Day's members. Example:
{
...
datetime.date(2008, 5, 3): Day(tempora:[Observance<tempora:Pasc5-6:4>]
celebration:[Observance<sancti:05-03-1:1>],
commemoration:[])
datetime.date(2008, 5, 4): Day(tempora:[Observance<tempora:Pasc6-0:2>],
celebration:[Observance<sancti:05-04-1:3>]
commemoration:[])
datetime.date(2008, 5, 5): Day(tempora:[Observance<tempora:Pasc6-1:4>],
celebration:[Observance<sancti:05-05-1:3>]
commemoration:[])
datetime.date(2008, 5, 6): Day(tempora:[Observance<tempora:Pasc6-2:4>],
celebration:[Observance<tempora:Pasc6-2:4>]
commemoration:[])
...
}
"""
lang =
|
"""
Get proper that is used in today Mass. If given day does not have a dedicated proper,
use the one from the latest Sunday.
"""
if self.celebration:
try:
return [i.get_proper() for i in self.celebration]
except ProperNotFound as e:
if self.celebration[0].flexibility == 'sancti':
log.error(e)
# No proper for this day, trying to get one from the latest Sunday
date_: date = copy(self.date)
while date_.weekday() != 6: # Sunday
if date_ == datetime.date(self.date.year, 1, 1):
break
date_ = date_ - datetime.timedelta(days=1)
day: Day = self.calendar.get_day(date_)
# Handling exceptions
if day.celebration[0].id == TEMPORA_EPI1_0:
|
identifier_body
|
models.py
|
name: Epi2-4
Each identifier consists of three colon-separated elements:
flexibility - determines if it's a fixed (sancti) or movable (tempora) observance
identifier - a unique human readable observance identifier. In case of movable
days it's a day's name, in case of 'sancti' days it contains a date
in format %m-%d
rank - observance's class, a number between 1 and 4
Example:
'tempora:Epi2-3:4' - means movable day of fourth class
which is third feria day (Wednesday) in second week after Epiphany
'sancti:11-19:4' - means a fixed day of fourth class falling on 19 Nov
"""
lang = None
def __init__(self, observance_id: str, date_: date, lang: str):
""" Build an Observance out of identifier and calendar date
:param observance_id: observance's identifier in format
<flexibility>:<identifier>:<rank>
:type observance_id: string
:param date_: specific date in which the observance is supposed
to be placed. For some Sancti days its rank (class)
depends on which calendar day they occur.
:type date_: `date ` object
"""
self.lang = lang
translation = importlib.import_module(f'constants.{lang}.translation')
flexibility, name, rank = observance_id.split(':')
self.flexibility: str = flexibility
self.name: str = name
self.rank: int = self._calc_rank(observance_id, date_, int(rank))
self.id: str = ':'.join((self.flexibility, self.name, str(self.rank)))
self.title: str = translation.titles.get(observance_id)
if flexibility == TYPE_TEMPORA and observance_id not in (C_10A, C_10B, C_10C, C_10PASC, C_10T):
self.weekday = WEEKDAY_MAPPING[re.sub('^.*-(\d+).*$', '\\1', name)]
else:
self.weekday = date_.weekday()
self.priority = self._calc_priority()
def get_proper(self) -> Tuple['Proper', 'Proper']:
return ProperParser.parse(self.id, self.lang)
def serialize(self) -> dict:
return {'id': self.id, 'rank': self.rank, 'title': self.title}
def _calc_rank(self, observance_id: str, date_: date, original_rank: int) -> int:
"""
Some observance's ranks depend on calendar day on which they fall, for example:
Advent feria days between 17 and 23 December are 2 class,
while other feria Advent days are 3 class;
"""
for case in TEMPORA_RANK_MAP:
if date_.month == case['month'] and date_.day == case['day'] and re.match(case['pattern'], observance_id):
return case['rank']
return original_rank
def _calc_priority(self) -> Union[None, int]:
"""
Calculate priority according to the Precedence Table.
"""
for priority, pattern in enumerate(TABLE_OF_PRECEDENCE):
if re.match(pattern, self.id):
return priority
def __repr__(self):
return "<{}>".format(self.id)
def __eq__(self, other):
return not self.rank < other.rank and not other.rank < self.rank
def __ne__(self, other):
return self.rank < other.rank or other.rank < self.rank
def __ge__(self, other):
return not self.rank < other.rank
def __gt__(self, other):
return other.rank > self.rank
def __lt__(self, other):
return other.rank < self.rank
def __le__(self, other):
return not other.rank > self.rank
class Day:
""" Class used to keep `Observance` objects for particular days of Missal.
It contains three lists: `tempora`, `celebration` and `commemoration`.
On Missal's creation the lists are filled in so that `tempora` always contains `Observance` representing
given variable day, `celebration` contains an `Observance`s to be celebrated in this day and
`commemoration` contains zero or more `Observance`s that should be commemorated with the main celebration.
"""
calendar: 'Calendar' = None
tempora: List['Observance'] = None
celebration: List['Observance'] = None
commemoration: List['Observance'] = None
def __init__(self, date_: date, calendar: 'Calendar') -> None:
self.date = date_
self.calendar = calendar
self.tempora = []
self.celebration = []
self.commemoration = []
@property
def all(self) -> List['Observance']:
return self.tempora + self.celebration + self.commemoration
def get_tempora_name(self) -> Union[None, str]:
if self.tempora:
return self.tempora[0].title
def get_celebration_name(self) -> Union[None, str]:
if self.celebration:
return self.celebration[0].title
def get_proper(self) -> List[Tuple['Proper', 'Proper']]:
"""
Get proper that is used in today Mass. If given day does not have a dedicated proper,
use the one from the latest Sunday.
"""
if self.celebration:
try:
return [i.get_proper() for i in self.celebration]
except ProperNotFound as e:
if self.celebration[0].flexibility == 'sancti':
log.error(e)
# No proper for this day, trying to get one from the latest Sunday
date_: date = copy(self.date)
while date_.weekday() != 6: # Sunday
if date_ == datetime.date(self.date.year, 1, 1):
break
date_ = date_ - datetime.timedelta(days=1)
day: Day = self.calendar.get_day(date_)
# Handling exceptions
if day.celebration[0].id == TEMPORA_EPI1_0:
# "Feast of the Holy Family" replaces "First Sunday after Epiphany"; use the latter in
# following days without the own proper
return [ProperParser.parse(TEMPORA_EPI1_0A, self.calendar.lang)]
if day.celebration[0].id == TEMPORA_PENT01_0:
# "Trinity Sunday" replaces "1st Sunday after Pentecost"; use the latter in
# following days without the own proper
return [ProperParser.parse(PENT01_0A, self.calendar.lang)]
if day.tempora:
return [day.tempora[0].get_proper()]
return [day.celebration[0].get_proper()]
def serialize(self) -> dict:
serialized = {}
for container in ('tempora', 'celebration', 'commemoration'):
serialized[container] = [i.serialize() for i in getattr(self, container)]
return serialized
def __str__(self):
return str(self.tempora) + str(self.celebration) + str(self.commemoration)
class Calendar:
"""
Class representing a Calendar.
Internally it keeps the data in an ordered dict of `Days`s where each key is a `date` object and value
is a `Day` containing `Observance` objects organized inside Day's members. Example:
{
...
datetime.date(2008, 5, 3): Day(tempora:[Observance<tempora:Pasc5-6:4>]
celebration:[Observance<sancti:05-03-1:1>],
commemoration:[])
datetime.date(2008, 5, 4): Day(tempora:[Observance<tempora:Pasc6-0:2>],
celebration:[Observance<sancti:05-04-1:3>]
commemoration:[])
datetime.date(2008, 5, 5): Day(tempora:[Observance<tempora:Pasc6-1:4>],
celebration:[Observance<sancti:05-05-1:3>]
commemoration:[])
datetime.date(2008, 5, 6): Day(tempora:[Observance<tempora:Pasc6-2:4>],
celebration:[Observance<tempora:Pasc6-2:4>]
commemoration:[])
...
}
"""
lang = None
_container = None
def __init__(self, year: int, lang: str) -> None:
""" Build a calendar and fill it in with empty `Day` objects
"""
self.lang = lang
self._container = OrderedDict()
self._build_empty_calendar(year)
def _build_empty_calendar(self, year: int) -> None:
|
date_ = date(year, 1, 1)
while date_.year == year:
|
random_line_split
|
|
sample.go
|
unkItem, 64)
allStats := &SampleStats{}
statsLock := sync.Mutex{}
addStats := func(stats SampleStats) {
statsLock.Lock()
allStats.add(stats)
statsLock.Unlock()
}
t := time.Now()
excludedBatchIDs, err := db.batchesBelowValue(minBatchBalance)
if err != nil {
db.logger.Error(err, "get batches below value")
}
allStats.BatchesBelowValueDuration = time.Since(t)
// Phase 1: Iterate chunk addresses
g.Go(func() error {
start := time.Now()
stats := SampleStats{}
defer func() {
stats.IterationDuration = time.Since(start)
close(chunkC)
addStats(stats)
}()
err := db.reserve.IterateChunksItems(db.repo, storageRadius, func(chi reserve.ChunkItem) (bool, error) {
select {
case chunkC <- chi:
stats.TotalIterated++
return false, nil
case <-ctx.Done():
return false, ctx.Err()
}
})
return err
})
// Phase 2: Get the chunk data and calculate transformed hash
sampleItemChan := make(chan SampleItem, 64)
const workers = 6
for i := 0; i < workers; i++ {
g.Go(func() error {
wstat := SampleStats{}
defer func() {
addStats(wstat)
}()
hmacr := hmac.New(swarm.NewHasher, anchor)
for chItem := range chunkC {
// exclude chunks who's batches balance are below minimum
if _, found := excludedBatchIDs[string(chItem.BatchID)]; found {
wstat.BelowBalanceIgnored++
continue
}
// Skip chunks if they are not SOC or CAC
if chItem.Type != swarm.ChunkTypeSingleOwner &&
chItem.Type != swarm.ChunkTypeContentAddressed {
wstat.RogueChunk++
continue
}
chunkLoadStart := time.Now()
chunk, err := db.ChunkStore().Get(ctx, chItem.ChunkAddress)
if err != nil {
wstat.ChunkLoadFailed++
db.logger.Debug("failed loading chunk", "chunk_address", chItem.ChunkAddress, "error", err)
continue
}
wstat.ChunkLoadDuration += time.Since(chunkLoadStart)
hmacrStart := time.Now()
hmacr.Reset()
_, err = hmacr.Write(chunk.Data())
if err != nil {
return err
}
taddr := swarm.NewAddress(hmacr.Sum(nil))
wstat.HmacrDuration += time.Since(hmacrStart)
select {
case sampleItemChan <- SampleItem{
TransformedAddress: taddr,
ChunkAddress: chunk.Address(),
ChunkData: chunk.Data(),
Stamp: postage.NewStamp(chItem.BatchID, nil, nil, nil),
}:
case <-ctx.Done():
return ctx.Err()
}
}
return nil
})
}
go func() {
_ = g.Wait()
close(sampleItemChan)
}()
sampleItems := make([]SampleItem, 0, SampleSize)
// insert function will insert the new item in its correct place. If the sample
// size goes beyond what we need we omit the last item.
insert := func(item SampleItem) {
added := false
for i, sItem := range sampleItems {
if le(item.TransformedAddress, sItem.TransformedAddress) {
sampleItems = append(sampleItems[:i+1], sampleItems[i:]...)
sampleItems[i] = item
added = true
break
}
}
if len(sampleItems) > SampleSize {
sampleItems = sampleItems[:SampleSize]
}
if len(sampleItems) < SampleSize && !added {
sampleItems = append(sampleItems, item)
}
}
// Phase 3: Assemble the sample. Here we need to assemble only the first SampleSize
// no of items from the results of the 2nd phase.
// In this step stamps are loaded and validated only if chunk will be added to sample.
stats := SampleStats{}
for item := range sampleItemChan {
currentMaxAddr := swarm.EmptyAddress
if len(sampleItems) > 0 {
currentMaxAddr = sampleItems[len(sampleItems)-1].TransformedAddress
}
if le(item.TransformedAddress, currentMaxAddr) || len(sampleItems) < SampleSize {
start := time.Now()
stamp, err := chunkstamp.LoadWithBatchID(db.repo.IndexStore(), "reserve", item.ChunkAddress, item.Stamp.BatchID())
if err != nil {
stats.StampLoadFailed++
db.logger.Debug("failed loading stamp", "chunk_address", item.ChunkAddress, "error", err)
continue
}
ch := swarm.NewChunk(item.ChunkAddress, item.ChunkData).WithStamp(stamp)
// check if the timestamp on the postage stamp is not later than the consensus time.
if binary.BigEndian.Uint64(ch.Stamp().Timestamp()) > consensusTime {
stats.NewIgnored++
continue
}
if _, err := db.validStamp(ch); err != nil {
stats.InvalidStamp++
db.logger.Debug("invalid stamp for chunk", "chunk_address", ch.Address(), "error", err)
continue
}
stats.ValidStampDuration += time.Since(start)
item.Stamp = stamp
insert(item)
stats.SampleInserts++
}
}
addStats(stats)
allStats.TotalDuration = time.Since(t)
if err := g.Wait(); err != nil {
db.logger.Info("reserve sampler finished with error", "err", err, "duration", time.Since(t), "storage_radius", storageRadius, "consensus_time_ns", consensusTime, "stats", fmt.Sprintf("%+v", allStats))
return Sample{}, fmt.Errorf("sampler: failed creating sample: %w", err)
}
db.logger.Info("reserve sampler finished", "duration", time.Since(t), "storage_radius", storageRadius, "consensus_time_ns", consensusTime, "stats", fmt.Sprintf("%+v", allStats))
return Sample{Stats: *allStats, Items: sampleItems}, nil
}
// less function uses the byte compare to check for lexicographic ordering
func le(a, b swarm.Address) bool {
return bytes.Compare(a.Bytes(), b.Bytes()) == -1
}
func (db *DB) batchesBelowValue(until *big.Int) (map[string]struct{}, error) {
res := make(map[string]struct{})
if until == nil {
return res, nil
}
err := db.batchstore.Iterate(func(b *postage.Batch) (bool, error) {
if b.Value.Cmp(until) < 0 {
res[string(b.ID)] = struct{}{}
}
return false, nil
})
return res, err
}
func transformedAddress(hasher *bmt.Hasher, chunk swarm.Chunk, chType swarm.ChunkType) (swarm.Address, error) {
switch chType {
case swarm.ChunkTypeContentAddressed:
return transformedAddressCAC(hasher, chunk)
case swarm.ChunkTypeSingleOwner:
return transformedAddressSOC(hasher, chunk)
default:
return swarm.ZeroAddress, fmt.Errorf("chunk type [%v] is is not valid", chType)
}
}
func transformedAddressCAC(hasher *bmt.Hasher, chunk swarm.Chunk) (swarm.Address, error) {
hasher.Reset()
hasher.SetHeader(chunk.Data()[:bmt.SpanSize])
_, err := hasher.Write(chunk.Data()[bmt.SpanSize:])
if err != nil {
return swarm.ZeroAddress, err
}
taddr, err := hasher.Hash(nil)
if err != nil {
return swarm.ZeroAddress, err
}
return swarm.NewAddress(taddr), nil
}
func transformedAddressSOC(hasher *bmt.Hasher, chunk swarm.Chunk) (swarm.Address, error) {
// Calculate transformed address from wrapped chunk
sChunk, err := soc.FromChunk(chunk)
if err != nil {
return swarm.ZeroAddress, err
}
taddrCac, err := transformedAddressCAC(hasher, sChunk.WrappedChunk())
if err != nil {
return swarm.ZeroAddress, err
}
// Hash address and transformed address to make transformed address for this SOC
sHasher := swarm.NewHasher()
if _, err := sHasher.Write(chunk.Address().Bytes()); err != nil {
return swarm.ZeroAddress, err
}
if _, err := sHasher.Write(taddrCac.Bytes()); err != nil {
return swarm.ZeroAddress, err
}
return swarm.NewAddress(sHasher.Sum(nil)), nil
}
type SampleStats struct {
TotalDuration time.Duration
TotalIterated int64
IterationDuration time.Duration
SampleInserts int64
NewIgnored int64
InvalidStamp int64
BelowBalanceIgnored int64
HmacrDuration time.Duration
ValidStampDuration time.Duration
BatchesBelowValueDuration time.Duration
RogueChunk int64
ChunkLoadDuration time.Duration
ChunkLoadFailed int64
StampLoadFailed int64
}
func (s *SampleStats)
|
add
|
identifier_name
|
|
sample.go
|
*big.Int,
) (Sample, error) {
g, ctx := errgroup.WithContext(ctx)
chunkC := make(chan reserve.ChunkItem, 64)
allStats := &SampleStats{}
statsLock := sync.Mutex{}
addStats := func(stats SampleStats) {
statsLock.Lock()
allStats.add(stats)
statsLock.Unlock()
}
t := time.Now()
excludedBatchIDs, err := db.batchesBelowValue(minBatchBalance)
if err != nil {
db.logger.Error(err, "get batches below value")
}
allStats.BatchesBelowValueDuration = time.Since(t)
// Phase 1: Iterate chunk addresses
g.Go(func() error {
start := time.Now()
stats := SampleStats{}
defer func() {
stats.IterationDuration = time.Since(start)
close(chunkC)
addStats(stats)
}()
err := db.reserve.IterateChunksItems(db.repo, storageRadius, func(chi reserve.ChunkItem) (bool, error) {
select {
case chunkC <- chi:
stats.TotalIterated++
return false, nil
case <-ctx.Done():
return false, ctx.Err()
}
})
return err
})
// Phase 2: Get the chunk data and calculate transformed hash
sampleItemChan := make(chan SampleItem, 64)
const workers = 6
for i := 0; i < workers; i++ {
g.Go(func() error {
wstat := SampleStats{}
defer func() {
addStats(wstat)
}()
hmacr := hmac.New(swarm.NewHasher, anchor)
for chItem := range chunkC {
// exclude chunks who's batches balance are below minimum
if _, found := excludedBatchIDs[string(chItem.BatchID)]; found {
wstat.BelowBalanceIgnored++
continue
}
// Skip chunks if they are not SOC or CAC
if chItem.Type != swarm.ChunkTypeSingleOwner &&
chItem.Type != swarm.ChunkTypeContentAddressed {
wstat.RogueChunk++
continue
}
chunkLoadStart := time.Now()
chunk, err := db.ChunkStore().Get(ctx, chItem.ChunkAddress)
if err != nil {
wstat.ChunkLoadFailed++
db.logger.Debug("failed loading chunk", "chunk_address", chItem.ChunkAddress, "error", err)
continue
}
wstat.ChunkLoadDuration += time.Since(chunkLoadStart)
hmacrStart := time.Now()
hmacr.Reset()
_, err = hmacr.Write(chunk.Data())
if err != nil {
return err
}
taddr := swarm.NewAddress(hmacr.Sum(nil))
wstat.HmacrDuration += time.Since(hmacrStart)
select {
case sampleItemChan <- SampleItem{
TransformedAddress: taddr,
ChunkAddress: chunk.Address(),
ChunkData: chunk.Data(),
Stamp: postage.NewStamp(chItem.BatchID, nil, nil, nil),
}:
case <-ctx.Done():
return ctx.Err()
}
}
return nil
})
}
go func() {
_ = g.Wait()
close(sampleItemChan)
}()
sampleItems := make([]SampleItem, 0, SampleSize)
// insert function will insert the new item in its correct place. If the sample
// size goes beyond what we need we omit the last item.
insert := func(item SampleItem) {
added := false
for i, sItem := range sampleItems {
if le(item.TransformedAddress, sItem.TransformedAddress) {
sampleItems = append(sampleItems[:i+1], sampleItems[i:]...)
sampleItems[i] = item
added = true
break
}
}
if len(sampleItems) > SampleSize {
sampleItems = sampleItems[:SampleSize]
}
if len(sampleItems) < SampleSize && !added {
sampleItems = append(sampleItems, item)
}
}
// Phase 3: Assemble the sample. Here we need to assemble only the first SampleSize
// no of items from the results of the 2nd phase.
// In this step stamps are loaded and validated only if chunk will be added to sample.
stats := SampleStats{}
for item := range sampleItemChan {
currentMaxAddr := swarm.EmptyAddress
if len(sampleItems) > 0 {
currentMaxAddr = sampleItems[len(sampleItems)-1].TransformedAddress
}
if le(item.TransformedAddress, currentMaxAddr) || len(sampleItems) < SampleSize {
start := time.Now()
stamp, err := chunkstamp.LoadWithBatchID(db.repo.IndexStore(), "reserve", item.ChunkAddress, item.Stamp.BatchID())
if err != nil {
stats.StampLoadFailed++
db.logger.Debug("failed loading stamp", "chunk_address", item.ChunkAddress, "error", err)
continue
}
ch := swarm.NewChunk(item.ChunkAddress, item.ChunkData).WithStamp(stamp)
// check if the timestamp on the postage stamp is not later than the consensus time.
if binary.BigEndian.Uint64(ch.Stamp().Timestamp()) > consensusTime {
stats.NewIgnored++
continue
}
if _, err := db.validStamp(ch); err != nil {
stats.InvalidStamp++
db.logger.Debug("invalid stamp for chunk", "chunk_address", ch.Address(), "error", err)
continue
}
stats.ValidStampDuration += time.Since(start)
item.Stamp = stamp
insert(item)
stats.SampleInserts++
}
}
addStats(stats)
allStats.TotalDuration = time.Since(t)
if err := g.Wait(); err != nil {
db.logger.Info("reserve sampler finished with error", "err", err, "duration", time.Since(t), "storage_radius", storageRadius, "consensus_time_ns", consensusTime, "stats", fmt.Sprintf("%+v", allStats))
return Sample{}, fmt.Errorf("sampler: failed creating sample: %w", err)
}
db.logger.Info("reserve sampler finished", "duration", time.Since(t), "storage_radius", storageRadius, "consensus_time_ns", consensusTime, "stats", fmt.Sprintf("%+v", allStats))
return Sample{Stats: *allStats, Items: sampleItems}, nil
}
// less function uses the byte compare to check for lexicographic ordering
func le(a, b swarm.Address) bool {
return bytes.Compare(a.Bytes(), b.Bytes()) == -1
}
func (db *DB) batchesBelowValue(until *big.Int) (map[string]struct{}, error) {
res := make(map[string]struct{})
if until == nil {
return res, nil
}
err := db.batchstore.Iterate(func(b *postage.Batch) (bool, error) {
if b.Value.Cmp(until) < 0 {
res[string(b.ID)] = struct{}{}
}
return false, nil
})
return res, err
}
func transformedAddress(hasher *bmt.Hasher, chunk swarm.Chunk, chType swarm.ChunkType) (swarm.Address, error) {
switch chType {
case swarm.ChunkTypeContentAddressed:
return transformedAddressCAC(hasher, chunk)
case swarm.ChunkTypeSingleOwner:
return transformedAddressSOC(hasher, chunk)
default:
return swarm.ZeroAddress, fmt.Errorf("chunk type [%v] is is not valid", chType)
}
}
func transformedAddressCAC(hasher *bmt.Hasher, chunk swarm.Chunk) (swarm.Address, error) {
hasher.Reset()
hasher.SetHeader(chunk.Data()[:bmt.SpanSize])
_, err := hasher.Write(chunk.Data()[bmt.SpanSize:])
if err != nil {
return swarm.ZeroAddress, err
}
taddr, err := hasher.Hash(nil)
if err != nil {
return swarm.ZeroAddress, err
}
return swarm.NewAddress(taddr), nil
}
func transformedAddressSOC(hasher *bmt.Hasher, chunk swarm.Chunk) (swarm.Address, error) {
// Calculate transformed address from wrapped chunk
sChunk, err := soc.FromChunk(chunk)
if err != nil {
return swarm.ZeroAddress, err
}
taddrCac, err := transformedAddressCAC(hasher, sChunk.WrappedChunk())
if err != nil {
return swarm.ZeroAddress, err
}
// Hash address and transformed address to make transformed address for this SOC
sHasher := swarm.NewHasher()
if _, err := sHasher.Write(chunk.Address().Bytes()); err != nil {
return swarm.ZeroAddress, err
}
if _, err := sHasher.Write(taddrCac.Bytes()); err != nil {
return swarm.ZeroAddress, err
}
return swarm.NewAddress(sHasher.Sum(nil)), nil
}
type SampleStats struct {
TotalDuration time.Duration
TotalIterated int64
IterationDuration time.Duration
SampleInserts int64
NewIgnored int64
InvalidStamp int64
BelowBalanceIgnored int64
HmacrDuration time.Duration
ValidStampDuration time.Duration
BatchesBelowValueDuration time.Duration
RogueChunk int64
ChunkLoadDuration time.Duration
|
random_line_split
|
||
sample.go
|
Nodes need to calculate the sample
// in the most optimal way and there are time restrictions. The lottery round is a
// time based round, so nodes participating in the round need to perform this
// calculation within the round limits.
// In order to optimize this we use a simple pipeline pattern:
// Iterate chunk addresses -> Get the chunk data and calculate transformed hash -> Assemble the sample
func (db *DB) ReserveSample(
ctx context.Context,
anchor []byte,
storageRadius uint8,
consensusTime uint64,
minBatchBalance *big.Int,
) (Sample, error) {
g, ctx := errgroup.WithContext(ctx)
chunkC := make(chan reserve.ChunkItem, 64)
allStats := &SampleStats{}
statsLock := sync.Mutex{}
addStats := func(stats SampleStats) {
statsLock.Lock()
allStats.add(stats)
statsLock.Unlock()
}
t := time.Now()
excludedBatchIDs, err := db.batchesBelowValue(minBatchBalance)
if err != nil {
db.logger.Error(err, "get batches below value")
}
allStats.BatchesBelowValueDuration = time.Since(t)
// Phase 1: Iterate chunk addresses
g.Go(func() error {
start := time.Now()
stats := SampleStats{}
defer func() {
stats.IterationDuration = time.Since(start)
close(chunkC)
addStats(stats)
}()
err := db.reserve.IterateChunksItems(db.repo, storageRadius, func(chi reserve.ChunkItem) (bool, error) {
select {
case chunkC <- chi:
stats.TotalIterated++
return false, nil
case <-ctx.Done():
return false, ctx.Err()
}
})
return err
})
// Phase 2: Get the chunk data and calculate transformed hash
sampleItemChan := make(chan SampleItem, 64)
const workers = 6
for i := 0; i < workers; i++ {
g.Go(func() error {
wstat := SampleStats{}
defer func() {
addStats(wstat)
}()
hmacr := hmac.New(swarm.NewHasher, anchor)
for chItem := range chunkC {
// exclude chunks who's batches balance are below minimum
if _, found := excludedBatchIDs[string(chItem.BatchID)]; found {
wstat.BelowBalanceIgnored++
continue
}
// Skip chunks if they are not SOC or CAC
if chItem.Type != swarm.ChunkTypeSingleOwner &&
chItem.Type != swarm.ChunkTypeContentAddressed {
wstat.RogueChunk++
continue
}
chunkLoadStart := time.Now()
chunk, err := db.ChunkStore().Get(ctx, chItem.ChunkAddress)
if err != nil {
wstat.ChunkLoadFailed++
db.logger.Debug("failed loading chunk", "chunk_address", chItem.ChunkAddress, "error", err)
continue
}
wstat.ChunkLoadDuration += time.Since(chunkLoadStart)
hmacrStart := time.Now()
hmacr.Reset()
_, err = hmacr.Write(chunk.Data())
if err != nil {
return err
}
taddr := swarm.NewAddress(hmacr.Sum(nil))
wstat.HmacrDuration += time.Since(hmacrStart)
select {
case sampleItemChan <- SampleItem{
TransformedAddress: taddr,
ChunkAddress: chunk.Address(),
ChunkData: chunk.Data(),
Stamp: postage.NewStamp(chItem.BatchID, nil, nil, nil),
}:
case <-ctx.Done():
return ctx.Err()
}
}
return nil
})
}
go func() {
_ = g.Wait()
close(sampleItemChan)
}()
sampleItems := make([]SampleItem, 0, SampleSize)
// insert function will insert the new item in its correct place. If the sample
// size goes beyond what we need we omit the last item.
insert := func(item SampleItem) {
added := false
for i, sItem := range sampleItems {
if le(item.TransformedAddress, sItem.TransformedAddress) {
sampleItems = append(sampleItems[:i+1], sampleItems[i:]...)
sampleItems[i] = item
added = true
break
}
}
if len(sampleItems) > SampleSize {
sampleItems = sampleItems[:SampleSize]
}
if len(sampleItems) < SampleSize && !added {
sampleItems = append(sampleItems, item)
}
}
// Phase 3: Assemble the sample. Here we need to assemble only the first SampleSize
// no of items from the results of the 2nd phase.
// In this step stamps are loaded and validated only if chunk will be added to sample.
stats := SampleStats{}
for item := range sampleItemChan {
currentMaxAddr := swarm.EmptyAddress
if len(sampleItems) > 0 {
currentMaxAddr = sampleItems[len(sampleItems)-1].TransformedAddress
}
if le(item.TransformedAddress, currentMaxAddr) || len(sampleItems) < SampleSize {
start := time.Now()
stamp, err := chunkstamp.LoadWithBatchID(db.repo.IndexStore(), "reserve", item.ChunkAddress, item.Stamp.BatchID())
if err != nil {
stats.StampLoadFailed++
db.logger.Debug("failed loading stamp", "chunk_address", item.ChunkAddress, "error", err)
continue
}
ch := swarm.NewChunk(item.ChunkAddress, item.ChunkData).WithStamp(stamp)
// check if the timestamp on the postage stamp is not later than the consensus time.
if binary.BigEndian.Uint64(ch.Stamp().Timestamp()) > consensusTime {
stats.NewIgnored++
continue
}
if _, err := db.validStamp(ch); err != nil {
stats.InvalidStamp++
db.logger.Debug("invalid stamp for chunk", "chunk_address", ch.Address(), "error", err)
continue
}
stats.ValidStampDuration += time.Since(start)
item.Stamp = stamp
insert(item)
stats.SampleInserts++
}
}
addStats(stats)
allStats.TotalDuration = time.Since(t)
if err := g.Wait(); err != nil {
db.logger.Info("reserve sampler finished with error", "err", err, "duration", time.Since(t), "storage_radius", storageRadius, "consensus_time_ns", consensusTime, "stats", fmt.Sprintf("%+v", allStats))
return Sample{}, fmt.Errorf("sampler: failed creating sample: %w", err)
}
db.logger.Info("reserve sampler finished", "duration", time.Since(t), "storage_radius", storageRadius, "consensus_time_ns", consensusTime, "stats", fmt.Sprintf("%+v", allStats))
return Sample{Stats: *allStats, Items: sampleItems}, nil
}
// less function uses the byte compare to check for lexicographic ordering
func le(a, b swarm.Address) bool {
return bytes.Compare(a.Bytes(), b.Bytes()) == -1
}
func (db *DB) batchesBelowValue(until *big.Int) (map[string]struct{}, error) {
res := make(map[string]struct{})
if until == nil {
return res, nil
}
err := db.batchstore.Iterate(func(b *postage.Batch) (bool, error) {
if b.Value.Cmp(until) < 0 {
res[string(b.ID)] = struct{}{}
}
return false, nil
})
return res, err
}
func transformedAddress(hasher *bmt.Hasher, chunk swarm.Chunk, chType swarm.ChunkType) (swarm.Address, error) {
switch chType {
case swarm.ChunkTypeContentAddressed:
return transformedAddressCAC(hasher, chunk)
case swarm.ChunkTypeSingleOwner:
return transformedAddressSOC(hasher, chunk)
default:
return swarm.ZeroAddress, fmt.Errorf("chunk type [%v] is is not valid", chType)
}
}
func transformedAddressCAC(hasher *bmt.Hasher, chunk swarm.Chunk) (swarm.Address, error) {
hasher.Reset()
hasher.SetHeader(chunk.Data()[:bmt.SpanSize])
_, err := hasher.Write(chunk.Data()[bmt.SpanSize:])
if err != nil {
return swarm.ZeroAddress, err
}
taddr, err := hasher.Hash(nil)
if err != nil {
return swarm.ZeroAddress, err
}
return swarm.NewAddress(taddr), nil
}
func transformedAddressSOC(hasher *bmt.Hasher, chunk swarm.Chunk) (swarm.Address, error)
|
{
// Calculate transformed address from wrapped chunk
sChunk, err := soc.FromChunk(chunk)
if err != nil {
return swarm.ZeroAddress, err
}
taddrCac, err := transformedAddressCAC(hasher, sChunk.WrappedChunk())
if err != nil {
return swarm.ZeroAddress, err
}
// Hash address and transformed address to make transformed address for this SOC
sHasher := swarm.NewHasher()
if _, err := sHasher.Write(chunk.Address().Bytes()); err != nil {
return swarm.ZeroAddress, err
}
if _, err := sHasher.Write(taddrCac.Bytes()); err != nil {
return swarm.ZeroAddress, err
}
|
identifier_body
|
|
sample.go
|
byte) Sample {
t.Helper()
hasher := bmt.NewTrHasher(anchor)
items := make([]SampleItem, SampleSize)
for i := 0; i < SampleSize; i++ {
ch := chunk.GenerateTestRandomChunk()
tr, err := transformedAddress(hasher, ch, swarm.ChunkTypeContentAddressed)
if err != nil {
t.Fatal(err)
}
items[i] = SampleItem{
TransformedAddress: tr,
ChunkAddress: ch.Address(),
ChunkData: ch.Data(),
Stamp: newStamp(ch.Stamp()),
}
}
sort.Slice(items, func(i, j int) bool {
return items[i].TransformedAddress.Compare(items[j].TransformedAddress) == -1
})
return Sample{Items: items}
}
func newStamp(s swarm.Stamp) *postage.Stamp {
return postage.NewStamp(s.BatchID(), s.Index(), s.Timestamp(), s.Sig())
}
// ReserveSample generates the sample of reserve storage of a node required for the
// storage incentives agent to participate in the lottery round. In order to generate
// this sample we need to iterate through all the chunks in the node's reserve and
// calculate the transformed hashes of all the chunks using the anchor as the salt.
// In order to generate the transformed hashes, we will use the std hmac keyed-hash
// implementation by using the anchor as the key. Nodes need to calculate the sample
// in the most optimal way and there are time restrictions. The lottery round is a
// time based round, so nodes participating in the round need to perform this
// calculation within the round limits.
// In order to optimize this we use a simple pipeline pattern:
// Iterate chunk addresses -> Get the chunk data and calculate transformed hash -> Assemble the sample
func (db *DB) ReserveSample(
ctx context.Context,
anchor []byte,
storageRadius uint8,
consensusTime uint64,
minBatchBalance *big.Int,
) (Sample, error) {
g, ctx := errgroup.WithContext(ctx)
chunkC := make(chan reserve.ChunkItem, 64)
allStats := &SampleStats{}
statsLock := sync.Mutex{}
addStats := func(stats SampleStats) {
statsLock.Lock()
allStats.add(stats)
statsLock.Unlock()
}
t := time.Now()
excludedBatchIDs, err := db.batchesBelowValue(minBatchBalance)
if err != nil {
db.logger.Error(err, "get batches below value")
}
allStats.BatchesBelowValueDuration = time.Since(t)
// Phase 1: Iterate chunk addresses
g.Go(func() error {
start := time.Now()
stats := SampleStats{}
defer func() {
stats.IterationDuration = time.Since(start)
close(chunkC)
addStats(stats)
}()
err := db.reserve.IterateChunksItems(db.repo, storageRadius, func(chi reserve.ChunkItem) (bool, error) {
select {
case chunkC <- chi:
stats.TotalIterated++
return false, nil
case <-ctx.Done():
return false, ctx.Err()
}
})
return err
})
// Phase 2: Get the chunk data and calculate transformed hash
sampleItemChan := make(chan SampleItem, 64)
const workers = 6
for i := 0; i < workers; i++ {
g.Go(func() error {
wstat := SampleStats{}
defer func() {
addStats(wstat)
}()
hmacr := hmac.New(swarm.NewHasher, anchor)
for chItem := range chunkC {
// exclude chunks who's batches balance are below minimum
if _, found := excludedBatchIDs[string(chItem.BatchID)]; found
|
// Skip chunks if they are not SOC or CAC
if chItem.Type != swarm.ChunkTypeSingleOwner &&
chItem.Type != swarm.ChunkTypeContentAddressed {
wstat.RogueChunk++
continue
}
chunkLoadStart := time.Now()
chunk, err := db.ChunkStore().Get(ctx, chItem.ChunkAddress)
if err != nil {
wstat.ChunkLoadFailed++
db.logger.Debug("failed loading chunk", "chunk_address", chItem.ChunkAddress, "error", err)
continue
}
wstat.ChunkLoadDuration += time.Since(chunkLoadStart)
hmacrStart := time.Now()
hmacr.Reset()
_, err = hmacr.Write(chunk.Data())
if err != nil {
return err
}
taddr := swarm.NewAddress(hmacr.Sum(nil))
wstat.HmacrDuration += time.Since(hmacrStart)
select {
case sampleItemChan <- SampleItem{
TransformedAddress: taddr,
ChunkAddress: chunk.Address(),
ChunkData: chunk.Data(),
Stamp: postage.NewStamp(chItem.BatchID, nil, nil, nil),
}:
case <-ctx.Done():
return ctx.Err()
}
}
return nil
})
}
go func() {
_ = g.Wait()
close(sampleItemChan)
}()
sampleItems := make([]SampleItem, 0, SampleSize)
// insert function will insert the new item in its correct place. If the sample
// size goes beyond what we need we omit the last item.
insert := func(item SampleItem) {
added := false
for i, sItem := range sampleItems {
if le(item.TransformedAddress, sItem.TransformedAddress) {
sampleItems = append(sampleItems[:i+1], sampleItems[i:]...)
sampleItems[i] = item
added = true
break
}
}
if len(sampleItems) > SampleSize {
sampleItems = sampleItems[:SampleSize]
}
if len(sampleItems) < SampleSize && !added {
sampleItems = append(sampleItems, item)
}
}
// Phase 3: Assemble the sample. Here we need to assemble only the first SampleSize
// no of items from the results of the 2nd phase.
// In this step stamps are loaded and validated only if chunk will be added to sample.
stats := SampleStats{}
for item := range sampleItemChan {
currentMaxAddr := swarm.EmptyAddress
if len(sampleItems) > 0 {
currentMaxAddr = sampleItems[len(sampleItems)-1].TransformedAddress
}
if le(item.TransformedAddress, currentMaxAddr) || len(sampleItems) < SampleSize {
start := time.Now()
stamp, err := chunkstamp.LoadWithBatchID(db.repo.IndexStore(), "reserve", item.ChunkAddress, item.Stamp.BatchID())
if err != nil {
stats.StampLoadFailed++
db.logger.Debug("failed loading stamp", "chunk_address", item.ChunkAddress, "error", err)
continue
}
ch := swarm.NewChunk(item.ChunkAddress, item.ChunkData).WithStamp(stamp)
// check if the timestamp on the postage stamp is not later than the consensus time.
if binary.BigEndian.Uint64(ch.Stamp().Timestamp()) > consensusTime {
stats.NewIgnored++
continue
}
if _, err := db.validStamp(ch); err != nil {
stats.InvalidStamp++
db.logger.Debug("invalid stamp for chunk", "chunk_address", ch.Address(), "error", err)
continue
}
stats.ValidStampDuration += time.Since(start)
item.Stamp = stamp
insert(item)
stats.SampleInserts++
}
}
addStats(stats)
allStats.TotalDuration = time.Since(t)
if err := g.Wait(); err != nil {
db.logger.Info("reserve sampler finished with error", "err", err, "duration", time.Since(t), "storage_radius", storageRadius, "consensus_time_ns", consensusTime, "stats", fmt.Sprintf("%+v", allStats))
return Sample{}, fmt.Errorf("sampler: failed creating sample: %w", err)
}
db.logger.Info("reserve sampler finished", "duration", time.Since(t), "storage_radius", storageRadius, "consensus_time_ns", consensusTime, "stats", fmt.Sprintf("%+v", allStats))
return Sample{Stats: *allStats, Items: sampleItems}, nil
}
// less function uses the byte compare to check for lexicographic ordering
func le(a, b swarm.Address) bool {
return bytes.Compare(a.Bytes(), b.Bytes()) == -1
}
func (db *DB) batchesBelowValue(until *big.Int) (map[string]struct{}, error) {
res := make(map[string]struct{})
if until == nil {
return res, nil
}
err := db.batchstore.Iterate(func(b *postage.Batch) (bool, error) {
if b.Value.Cmp(until) < 0 {
res[string(b.ID)] = struct{}{}
}
return false, nil
})
return res, err
}
func transformedAddress(hasher *bmt.Hasher, chunk swarm.Chunk, chType swarm.ChunkType) (swarm.Address, error) {
switch chType {
case swarm.ChunkTypeContentAddressed:
return transformedAddressCAC(hasher, chunk)
case swarm.ChunkTypeSingleOwner:
return transformedAddressSOC(hasher, chunk)
|
{
wstat.BelowBalanceIgnored++
continue
}
|
conditional_block
|
voxel_detection.py
|
cv
import numpy as np
import torch
import torch.nn as nn
from mmcv.parallel import collate, scatter
from mmdet3d.core.bbox import get_box_type
from mmdet3d.datasets.pipelines import Compose
from torch.utils.data import DataLoader, Dataset
from mmdeploy.codebase.base import BaseTask
from mmdeploy.codebase.mmdet3d.deploy.mmdetection3d import MMDET3D_TASK
from mmdeploy.utils import Task, get_root_logger, load_config
from .voxel_detection_model import VoxelDetectionModel
@MMDET3D_TASK.register_module(Task.VOXEL_DETECTION.value)
class VoxelDetection(BaseTask):
|
def init_pytorch_model(self,
model_checkpoint: Optional[str] = None,
cfg_options: Optional[Dict] = None,
**kwargs) -> torch.nn.Module:
"""Initialize torch model.
Args:
model_checkpoint (str): The checkpoint file of torch model,
defaults to `None`.
cfg_options (dict): Optional config key-pair parameters.
Returns:
nn.Module: An initialized torch model generated by other OpenMMLab
codebases.
"""
from mmdet3d.apis import init_model
device = self.device
model = init_model(self.model_cfg, model_checkpoint, device)
return model.eval()
def create_input(self, pcd: str, *args) -> Tuple[Dict, torch.Tensor]:
"""Create input for detector.
Args:
pcd (str): Input pcd file path.
Returns:
tuple: (data, input), meta information for the input pcd
and model input.
"""
data = VoxelDetection.read_pcd_file(pcd, self.model_cfg, self.device)
voxels, num_points, coors = VoxelDetectionModel.voxelize(
self.model_cfg, data['points'][0])
return data, (voxels, num_points, coors)
def visualize(self,
model: torch.nn.Module,
image: str,
result: list,
output_file: str,
window_name: str,
show_result: bool = False,
score_thr: float = 0.3):
"""Visualize predictions of a model.
Args:
model (nn.Module): Input model.
image (str): Pcd file to draw predictions on.
result (list): A list of predictions.
output_file (str): Output file to save result.
window_name (str): The name of visualization window. Defaults to
an empty string.
show_result (bool): Whether to show result in windows, defaults
to `False`.
score_thr (float): The score threshold to display the bbox.
Defaults to 0.3.
"""
from mmdet3d.apis import show_result_meshlab
data = VoxelDetection.read_pcd_file(image, self.model_cfg, self.device)
show_result_meshlab(
data,
result,
output_file,
score_thr,
show=show_result,
snapshot=1 - show_result,
task='det')
@staticmethod
def read_pcd_file(pcd: str, model_cfg: Union[str, mmcv.Config],
device: str) -> Dict:
"""Read data from pcd file and run test pipeline.
Args:
pcd (str): Pcd file path.
model_cfg (str | mmcv.Config): The model config.
device (str): A string specifying device type.
Returns:
dict: meta information for the input pcd.
"""
if isinstance(pcd, (list, tuple)):
pcd = pcd[0]
model_cfg = load_config(model_cfg)[0]
test_pipeline = Compose(model_cfg.data.test.pipeline)
box_type_3d, box_mode_3d = get_box_type(
model_cfg.data.test.box_type_3d)
data = dict(
pts_filename=pcd,
box_type_3d=box_type_3d,
box_mode_3d=box_mode_3d,
# for ScanNet demo we need axis_align_matrix
ann_info=dict(axis_align_matrix=np.eye(4)),
sweeps=[],
# set timestamp = 0
timestamp=[0],
img_fields=[],
bbox3d_fields=[],
pts_mask_fields=[],
pts_seg_fields=[],
bbox_fields=[],
mask_fields=[],
seg_fields=[])
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
data['img_metas'] = [
img_metas.data[0] for img_metas in data['img_metas']
]
data['points'] = [point.data[0] for point in data['points']]
if device != 'cpu':
data = scatter(data, [device])[0]
return data
@staticmethod
def run_inference(model: nn.Module,
model_inputs: Dict[str, torch.Tensor]) -> List:
"""Run inference once for a object detection model of mmdet3d.
Args:
model (nn.Module): Input model.
model_inputs (dict): A dict containing model inputs tensor and
meta info.
Returns:
list: The predictions of model inference.
"""
result = model(
return_loss=False,
points=model_inputs['points'],
img_metas=model_inputs['img_metas'])
return [result]
@staticmethod
def evaluate_outputs(model_cfg,
outputs: Sequence,
dataset: Dataset,
metrics: Optional[str] = None,
out: Optional[str] = None,
metric_options: Optional[dict] = None,
format_only: bool = False,
log_file: Optional[str] = None):
if out:
logger = get_root_logger()
logger.info(f'\nwriting results to {out}')
mmcv.dump(outputs, out)
kwargs = {} if metric_options is None else metric_options
if format_only:
dataset.format_results(outputs, **kwargs)
if metrics:
eval_kwargs = model_cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=metrics, **kwargs))
dataset.evaluate(outputs, **eval_kwargs)
def get_model_name(self) -> str:
"""Get the model name.
Return:
str: the name of the model.
"""
raise NotImplementedError
def get_tensor_from_input(self, input_data: Dict[str, Any],
**kwargs) -> torch.Tensor:
"""Get input tensor from input data.
Args:
input_data (dict): Input data containing meta info and image
tensor.
Returns:
torch.Tensor: An image in `Tensor`.
"""
raise NotImplementedError
def get_partition_cfg(partition_type: str, **kwargs) -> Dict:
"""Get a certain partition config for mmdet.
Args:
partition_type (str): A string specifying partition type.
Returns:
dict: A dictionary of partition config.
"""
raise NotImplementedError
def get_postprocess(self) -> Dict:
"""Get the postprocess information for SDK.
Return:
dict: Composed of the postprocess information.
"""
raise NotImplementedError
def get_preprocess(self) -> Dict:
"""Get the preprocess information for SDK.
Return:
dict: Composed of the preprocess information.
"""
raise NotImplementedError
def single_gpu_test(self,
model: nn.Module,
data_loader: DataLoader,
show: bool = False,
out_dir: Optional[str] = None,
**kwargs) -> List:
"""Run test with single gpu.
Args:
model (nn.Module): Input model from nn.Module.
data_loader (DataLoader): PyTorch data loader.
show (bool): Specifying whether to show plotted results. Defaults
to `False`.
out_dir (str): A directory to save results, defaults to `None`.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(data['points'][0].data,
data['img_metas'][0].data, False)
if show:
# Visualize the results of MMDetection3D model
# 'show_results' is MMdetection3D visualization API
if out_dir is None:
model.module.show_result(
data,
result,
out_dir='',
file_name='',
show=show,
snapshot=False,
score_thr=0.
|
def __init__(self, model_cfg: mmcv.Config, deploy_cfg: mmcv.Config,
device: str):
super().__init__(model_cfg, deploy_cfg, device)
def init_backend_model(self,
model_files: Sequence[str] = None,
**kwargs) -> torch.nn.Module:
"""Initialize backend model.
Args:
model_files (Sequence[str]): Input model files.
Returns:
nn.Module: An initialized backend model.
"""
from .voxel_detection_model import build_voxel_detection_model
model = build_voxel_detection_model(
model_files, self.model_cfg, self.deploy_cfg, device=self.device)
return model
|
identifier_body
|
voxel_detection.py
|
cv
import numpy as np
import torch
import torch.nn as nn
from mmcv.parallel import collate, scatter
from mmdet3d.core.bbox import get_box_type
from mmdet3d.datasets.pipelines import Compose
from torch.utils.data import DataLoader, Dataset
from mmdeploy.codebase.base import BaseTask
from mmdeploy.codebase.mmdet3d.deploy.mmdetection3d import MMDET3D_TASK
from mmdeploy.utils import Task, get_root_logger, load_config
from .voxel_detection_model import VoxelDetectionModel
@MMDET3D_TASK.register_module(Task.VOXEL_DETECTION.value)
class VoxelDetection(BaseTask):
def
|
(self, model_cfg: mmcv.Config, deploy_cfg: mmcv.Config,
device: str):
super().__init__(model_cfg, deploy_cfg, device)
def init_backend_model(self,
model_files: Sequence[str] = None,
**kwargs) -> torch.nn.Module:
"""Initialize backend model.
Args:
model_files (Sequence[str]): Input model files.
Returns:
nn.Module: An initialized backend model.
"""
from .voxel_detection_model import build_voxel_detection_model
model = build_voxel_detection_model(
model_files, self.model_cfg, self.deploy_cfg, device=self.device)
return model
def init_pytorch_model(self,
model_checkpoint: Optional[str] = None,
cfg_options: Optional[Dict] = None,
**kwargs) -> torch.nn.Module:
"""Initialize torch model.
Args:
model_checkpoint (str): The checkpoint file of torch model,
defaults to `None`.
cfg_options (dict): Optional config key-pair parameters.
Returns:
nn.Module: An initialized torch model generated by other OpenMMLab
codebases.
"""
from mmdet3d.apis import init_model
device = self.device
model = init_model(self.model_cfg, model_checkpoint, device)
return model.eval()
def create_input(self, pcd: str, *args) -> Tuple[Dict, torch.Tensor]:
"""Create input for detector.
Args:
pcd (str): Input pcd file path.
Returns:
tuple: (data, input), meta information for the input pcd
and model input.
"""
data = VoxelDetection.read_pcd_file(pcd, self.model_cfg, self.device)
voxels, num_points, coors = VoxelDetectionModel.voxelize(
self.model_cfg, data['points'][0])
return data, (voxels, num_points, coors)
def visualize(self,
model: torch.nn.Module,
image: str,
result: list,
output_file: str,
window_name: str,
show_result: bool = False,
score_thr: float = 0.3):
"""Visualize predictions of a model.
Args:
model (nn.Module): Input model.
image (str): Pcd file to draw predictions on.
result (list): A list of predictions.
output_file (str): Output file to save result.
window_name (str): The name of visualization window. Defaults to
an empty string.
show_result (bool): Whether to show result in windows, defaults
to `False`.
score_thr (float): The score threshold to display the bbox.
Defaults to 0.3.
"""
from mmdet3d.apis import show_result_meshlab
data = VoxelDetection.read_pcd_file(image, self.model_cfg, self.device)
show_result_meshlab(
data,
result,
output_file,
score_thr,
show=show_result,
snapshot=1 - show_result,
task='det')
@staticmethod
def read_pcd_file(pcd: str, model_cfg: Union[str, mmcv.Config],
device: str) -> Dict:
"""Read data from pcd file and run test pipeline.
Args:
pcd (str): Pcd file path.
model_cfg (str | mmcv.Config): The model config.
device (str): A string specifying device type.
Returns:
dict: meta information for the input pcd.
"""
if isinstance(pcd, (list, tuple)):
pcd = pcd[0]
model_cfg = load_config(model_cfg)[0]
test_pipeline = Compose(model_cfg.data.test.pipeline)
box_type_3d, box_mode_3d = get_box_type(
model_cfg.data.test.box_type_3d)
data = dict(
pts_filename=pcd,
box_type_3d=box_type_3d,
box_mode_3d=box_mode_3d,
# for ScanNet demo we need axis_align_matrix
ann_info=dict(axis_align_matrix=np.eye(4)),
sweeps=[],
# set timestamp = 0
timestamp=[0],
img_fields=[],
bbox3d_fields=[],
pts_mask_fields=[],
pts_seg_fields=[],
bbox_fields=[],
mask_fields=[],
seg_fields=[])
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
data['img_metas'] = [
img_metas.data[0] for img_metas in data['img_metas']
]
data['points'] = [point.data[0] for point in data['points']]
if device != 'cpu':
data = scatter(data, [device])[0]
return data
@staticmethod
def run_inference(model: nn.Module,
model_inputs: Dict[str, torch.Tensor]) -> List:
"""Run inference once for a object detection model of mmdet3d.
Args:
model (nn.Module): Input model.
model_inputs (dict): A dict containing model inputs tensor and
meta info.
Returns:
list: The predictions of model inference.
"""
result = model(
return_loss=False,
points=model_inputs['points'],
img_metas=model_inputs['img_metas'])
return [result]
@staticmethod
def evaluate_outputs(model_cfg,
outputs: Sequence,
dataset: Dataset,
metrics: Optional[str] = None,
out: Optional[str] = None,
metric_options: Optional[dict] = None,
format_only: bool = False,
log_file: Optional[str] = None):
if out:
logger = get_root_logger()
logger.info(f'\nwriting results to {out}')
mmcv.dump(outputs, out)
kwargs = {} if metric_options is None else metric_options
if format_only:
dataset.format_results(outputs, **kwargs)
if metrics:
eval_kwargs = model_cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=metrics, **kwargs))
dataset.evaluate(outputs, **eval_kwargs)
def get_model_name(self) -> str:
"""Get the model name.
Return:
str: the name of the model.
"""
raise NotImplementedError
def get_tensor_from_input(self, input_data: Dict[str, Any],
**kwargs) -> torch.Tensor:
"""Get input tensor from input data.
Args:
input_data (dict): Input data containing meta info and image
tensor.
Returns:
torch.Tensor: An image in `Tensor`.
"""
raise NotImplementedError
def get_partition_cfg(partition_type: str, **kwargs) -> Dict:
"""Get a certain partition config for mmdet.
Args:
partition_type (str): A string specifying partition type.
Returns:
dict: A dictionary of partition config.
"""
raise NotImplementedError
def get_postprocess(self) -> Dict:
"""Get the postprocess information for SDK.
Return:
dict: Composed of the postprocess information.
"""
raise NotImplementedError
def get_preprocess(self) -> Dict:
"""Get the preprocess information for SDK.
Return:
dict: Composed of the preprocess information.
"""
raise NotImplementedError
def single_gpu_test(self,
model: nn.Module,
data_loader: DataLoader,
show: bool = False,
out_dir: Optional[str] = None,
**kwargs) -> List:
"""Run test with single gpu.
Args:
model (nn.Module): Input model from nn.Module.
data_loader (DataLoader): PyTorch data loader.
show (bool): Specifying whether to show plotted results. Defaults
to `False`.
out_dir (str): A directory to save results, defaults to `None`.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(data['points'][0].data,
data['img_metas'][0].data, False)
if show:
# Visualize the results of MMDetection3D model
# 'show_results' is MMdetection3D visualization API
if out_dir is None:
model.module.show_result(
data,
result,
out_dir='',
file_name='',
show=show,
snapshot=False,
score_thr=0
|
__init__
|
identifier_name
|
voxel_detection.py
|
import numpy as np
import torch
import torch.nn as nn
from mmcv.parallel import collate, scatter
from mmdet3d.core.bbox import get_box_type
from mmdet3d.datasets.pipelines import Compose
from torch.utils.data import DataLoader, Dataset
from mmdeploy.codebase.base import BaseTask
from mmdeploy.codebase.mmdet3d.deploy.mmdetection3d import MMDET3D_TASK
from mmdeploy.utils import Task, get_root_logger, load_config
from .voxel_detection_model import VoxelDetectionModel
@MMDET3D_TASK.register_module(Task.VOXEL_DETECTION.value)
class VoxelDetection(BaseTask):
def __init__(self, model_cfg: mmcv.Config, deploy_cfg: mmcv.Config,
device: str):
super().__init__(model_cfg, deploy_cfg, device)
def init_backend_model(self,
model_files: Sequence[str] = None,
**kwargs) -> torch.nn.Module:
"""Initialize backend model.
Args:
model_files (Sequence[str]): Input model files.
Returns:
nn.Module: An initialized backend model.
"""
from .voxel_detection_model import build_voxel_detection_model
model = build_voxel_detection_model(
model_files, self.model_cfg, self.deploy_cfg, device=self.device)
return model
def init_pytorch_model(self,
model_checkpoint: Optional[str] = None,
cfg_options: Optional[Dict] = None,
**kwargs) -> torch.nn.Module:
"""Initialize torch model.
Args:
model_checkpoint (str): The checkpoint file of torch model,
defaults to `None`.
cfg_options (dict): Optional config key-pair parameters.
Returns:
nn.Module: An initialized torch model generated by other OpenMMLab
codebases.
"""
from mmdet3d.apis import init_model
device = self.device
model = init_model(self.model_cfg, model_checkpoint, device)
return model.eval()
def create_input(self, pcd: str, *args) -> Tuple[Dict, torch.Tensor]:
"""Create input for detector.
Args:
pcd (str): Input pcd file path.
Returns:
tuple: (data, input), meta information for the input pcd
and model input.
"""
data = VoxelDetection.read_pcd_file(pcd, self.model_cfg, self.device)
voxels, num_points, coors = VoxelDetectionModel.voxelize(
self.model_cfg, data['points'][0])
return data, (voxels, num_points, coors)
def visualize(self,
model: torch.nn.Module,
image: str,
result: list,
output_file: str,
window_name: str,
show_result: bool = False,
score_thr: float = 0.3):
"""Visualize predictions of a model.
Args:
model (nn.Module): Input model.
image (str): Pcd file to draw predictions on.
result (list): A list of predictions.
output_file (str): Output file to save result.
window_name (str): The name of visualization window. Defaults to
an empty string.
show_result (bool): Whether to show result in windows, defaults
to `False`.
score_thr (float): The score threshold to display the bbox.
Defaults to 0.3.
"""
from mmdet3d.apis import show_result_meshlab
data = VoxelDetection.read_pcd_file(image, self.model_cfg, self.device)
show_result_meshlab(
data,
result,
output_file,
score_thr,
show=show_result,
snapshot=1 - show_result,
task='det')
@staticmethod
def read_pcd_file(pcd: str, model_cfg: Union[str, mmcv.Config],
device: str) -> Dict:
"""Read data from pcd file and run test pipeline.
Args:
pcd (str): Pcd file path.
model_cfg (str | mmcv.Config): The model config.
device (str): A string specifying device type.
Returns:
dict: meta information for the input pcd.
"""
if isinstance(pcd, (list, tuple)):
pcd = pcd[0]
model_cfg = load_config(model_cfg)[0]
test_pipeline = Compose(model_cfg.data.test.pipeline)
box_type_3d, box_mode_3d = get_box_type(
model_cfg.data.test.box_type_3d)
data = dict(
pts_filename=pcd,
box_type_3d=box_type_3d,
box_mode_3d=box_mode_3d,
# for ScanNet demo we need axis_align_matrix
ann_info=dict(axis_align_matrix=np.eye(4)),
sweeps=[],
# set timestamp = 0
timestamp=[0],
img_fields=[],
bbox3d_fields=[],
pts_mask_fields=[],
pts_seg_fields=[],
bbox_fields=[],
mask_fields=[],
seg_fields=[])
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
data['img_metas'] = [
img_metas.data[0] for img_metas in data['img_metas']
]
data['points'] = [point.data[0] for point in data['points']]
if device != 'cpu':
data = scatter(data, [device])[0]
return data
@staticmethod
def run_inference(model: nn.Module,
model_inputs: Dict[str, torch.Tensor]) -> List:
"""Run inference once for a object detection model of mmdet3d.
Args:
model (nn.Module): Input model.
model_inputs (dict): A dict containing model inputs tensor and
meta info.
Returns:
list: The predictions of model inference.
"""
result = model(
return_loss=False,
points=model_inputs['points'],
img_metas=model_inputs['img_metas'])
return [result]
@staticmethod
def evaluate_outputs(model_cfg,
outputs: Sequence,
dataset: Dataset,
metrics: Optional[str] = None,
out: Optional[str] = None,
metric_options: Optional[dict] = None,
format_only: bool = False,
log_file: Optional[str] = None):
if out:
logger = get_root_logger()
logger.info(f'\nwriting results to {out}')
mmcv.dump(outputs, out)
kwargs = {} if metric_options is None else metric_options
if format_only:
dataset.format_results(outputs, **kwargs)
if metrics:
eval_kwargs = model_cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=metrics, **kwargs))
dataset.evaluate(outputs, **eval_kwargs)
def get_model_name(self) -> str:
"""Get the model name.
Return:
str: the name of the model.
"""
raise NotImplementedError
def get_tensor_from_input(self, input_data: Dict[str, Any],
**kwargs) -> torch.Tensor:
"""Get input tensor from input data.
Args:
input_data (dict): Input data containing meta info and image
tensor.
Returns:
torch.Tensor: An image in `Tensor`.
"""
raise NotImplementedError
def get_partition_cfg(partition_type: str, **kwargs) -> Dict:
"""Get a certain partition config for mmdet.
Args:
partition_type (str): A string specifying partition type.
Returns:
dict: A dictionary of partition config.
"""
raise NotImplementedError
def get_postprocess(self) -> Dict:
"""Get the postprocess information for SDK.
Return:
dict: Composed of the postprocess information.
"""
raise NotImplementedError
def get_preprocess(self) -> Dict:
"""Get the preprocess information for SDK.
Return:
dict: Composed of the preprocess information.
"""
raise NotImplementedError
def single_gpu_test(self,
model: nn.Module,
data_loader: DataLoader,
show: bool = False,
out_dir: Optional[str] = None,
**kwargs) -> List:
"""Run test with single gpu.
Args:
model (nn.Module): Input model from nn.Module.
data_loader (DataLoader): PyTorch data loader.
show (bool): Specifying whether to show plotted results. Defaults
to `False`.
out_dir (str): A directory to save results, defaults to `None`.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(data['points'][0].data,
data['img_metas'][0].data, False)
if show:
# Visualize the results of MMDetection3D model
# 'show_results' is MMdetection3D visualization API
if out_dir is None:
|
model.module.show_result(
data,
result,
out_dir='',
file_name='',
show=show,
snapshot=False,
score_thr=0.3)
|
conditional_block
|
|
bnj.go
|
.Mid {
if _, err = s.dao.UpdateSubMid(c, tp, v.Cid, v.Mid); err != nil {
return
}
}
}
return
}
// bnjDmCount laji bnj count
func (s *Service) bnjDmCount(c context.Context, sub *model.Subject, dm *model.DM) (err error) {
var (
dmid int64
pages []*api.Page
chosen *api.Page
choseSub *model.Subject
)
if _, ok := s.bnjSubAids[sub.Pid]; !ok {
return
}
if pages, err = s.arcRPC.Page3(c, &arcMdl.ArgAid2{
Aid: s.bnjAid,
RealIP: metadata.String(c, metadata.RemoteIP),
}); err != nil {
log.Error("bnjDmCount Page3(aid:%v) error(%v)", sub.Pid, err)
return
}
if len(pages) <= 0 {
return
}
idx := time.Now().Unix() % int64(len(pages))
if chosen = pages[idx]; chosen == nil {
return
}
if choseSub, err = s.subject(c, model.SubTypeVideo, chosen.Cid); err != nil {
return
}
if dmid, err = s.genDMID(c); err != nil {
log.Error("bnjDmCount genDMID() error(%v)", err)
return
}
forkDM := &model.DM{
ID: dmid,
Type: model.SubTypeVideo,
Oid: chosen.Cid,
Mid: dm.Mid,
Progress: int32((chosen.Duration + 1) * 1000),
Pool: dm.Pool,
State: model.StateAdminDelete,
Ctime: dm.Ctime,
Mtime: dm.Mtime,
Content: &model.Content{
ID: dmid,
FontSize: dm.Content.FontSize,
Color: dm.Content.Color,
Mode: dm.Content.Mode,
IP: dm.Content.IP,
Plat: dm.Content.Plat,
Msg: dm.Content.Msg,
Ctime: dm.Content.Ctime,
Mtime: dm.Content.Mtime,
},
}
if dm.Pool == model.PoolSpecial {
forkDM.ContentSpe = &model.ContentSpecial{
ID: dmid,
Msg: dm.ContentSpe.Msg,
Ctime: dm.ContentSpe.Ctime,
Mtime: dm.ContentSpe.Mtime,
}
}
if err = s.bnjAddDM(c, choseSub, forkDM); err != nil {
return
}
return
}
// bnjAddDM add dm index and content to db by transaction.
func (s *Service) bnjAddDM(c context.Context, sub *model.Subject, dm *model.DM) (err error) {
if dm.State != model.StateAdminDelete {
return
}
tx, err := s.dao.BeginTran(c)
if err != nil {
return
}
// special dm
if dm.Pool == model.PoolSpecial && dm.ContentSpe != nil {
if _, err = s.dao.TxAddContentSpecial(tx, dm.ContentSpe); err != nil {
return tx.Rollback()
}
}
if _, err = s.dao.TxAddContent(tx, dm.Oid, dm.Content); err != nil {
return tx.Rollback()
}
if _, err = s.dao.TxAddIndex(tx, dm); err != nil {
return tx.Rollback()
}
if _, err = s.dao.TxIncrSubjectCount(tx, sub.Type, sub.Oid, 1, 0, sub.Childpool); err != nil {
return tx.Rollback()
}
return tx.Commit()
}
func (s *Service) genDMID(c context.Context) (dmid int64, err error) {
if dmid, err = s.seqRPC.ID(c, s.seqArg); err != nil {
log.Error("seqRPC.ID() error(%v)", err)
return
}
return
}
// bnjLiveDanmu laji live to video
// TODO stime
func (s *Service) bnjLiveDanmu(c context.Context, liveDanmu *model.LiveDanmu) (err error) {
var (
cid, dmid int64
progress float64
)
// ignore time before
if time.Since(s.bnjStart) < 0 {
return
}
// limit
if liveDanmu == nil || s.bnjliveRoomID <= 0 || s.bnjliveRoomID != liveDanmu.RoomID || liveDanmu.MsgType != model.LiveDanmuMsgTypeNormal {
return
}
if liveDanmu.UserLevel < s.bnjUserLevel {
return
}
if s.bnjIgnoreRate <= 0 || rand.Int63n(s.bnjIgnoreRate) != 0 {
return
}
if cid, progress, err = s.pickBnjVideo(c, liveDanmu.Time); err != nil {
return
}
// ignore illegal progress
if progress <= 0 {
return
}
if err = s.checkBnjDmMsg(c, liveDanmu.Content); err != nil {
log.Error("bnj bnjLiveDanmu checkBnjDmMsg(liveDanmu:%+v) error(%v)", liveDanmu, err)
return
}
if dmid, err = s.genDMID(c); err != nil {
log.Error("bnj bnjLiveDanmu genDMID() error(%v)", err)
return
}
now := time.Now().Unix()
forkDM := &model.DM{
ID: dmid,
Type: model.SubTypeVideo,
Oid: cid,
Mid: liveDanmu.UID,
Progress: int32(progress * 1000),
Pool: model.PoolNormal,
State: model.StateMonitorAfter,
Ctime: model.ConvertStime(time.Now()),
Mtime: model.ConvertStime(time.Now()),
Content: &model.Content{
ID: dmid,
FontSize: 25,
Color: 16777215,
Mode: model.ModeRolling,
Plat: 0,
Msg: liveDanmu.Content,
Ctime: xtime.Time(now),
Mtime: xtime.Time(now),
},
}
if err = s.bnjCheckFilterService(c, forkDM); err != nil {
log.Error("s.bnjCheckFilterService(%+v) error(%v)", forkDM, err)
return
}
var (
bs []byte
)
if bs, err = json.Marshal(forkDM); err != nil {
log.Error("json.Marshal(%+v) error(%v)", forkDM, err)
return
}
act := &model.Action{
Action: model.ActAddDM,
Data: bs,
}
if err = s.actionAct(c, act); err != nil {
log.Error("s.actionAddDM(%+v) error(%v)", liveDanmu, err)
return
}
return
}
func (s *Service) pickBnjVideo(c context.Context, timestamp int64) (cid int64, progress float64, err error) {
var (
idx int
video *model.Video
)
progress = float64(timestamp - s.bnjStart.Unix())
for idx, video = range s.bnjArcVideos {
if progress > float64(video.Duration) {
progress = progress - float64(video.Duration)
continue
}
// ignore p1 start
if idx != 0 && progress < s.bnjIgnoreBeginTime.Seconds() {
err = ecode.DMProgressTooBig
return
}
if float64(video.Duration)-progress < s.bnjIgnoreEndTime.Seconds() {
err = ecode.DMProgressTooBig
return
}
if progress >= 0 {
progress = progress + float64(rand.Int31n(1000)/1000)
}
cid = video.Cid
return
}
err = ecode.DMProgressTooBig
return
}
func (s *Service) bnjCheckFilterService(c context.Context, dm *model.DM) (err error) {
var (
filterReply *filterMdl.FilterReply
)
if filterReply, err = s.filterRPC.Filter(c, &filterMdl.FilterReq{
Area: "danmu",
Message: dm.Content.Msg,
Id: dm.ID,
Oid: dm.Oid,
Mid: dm.Mid,
}); err != nil {
log.Error("checkFilterService(dm:%+v),err(%v)", dm, err)
return
}
if filterReply.Level > 0 || filterReply.Limit == model.SpamBlack || filterReply.Limit == model.SpamOverflow {
|
dm.State = model.StateFilter
log.Info("bnj filter service delete(dmid:%d,data:+%v)", dm.ID, filterReply)
|
random_line_split
|
|
bnj.go
|
+ 1) * 1000),
Pool: dm.Pool,
State: model.StateAdminDelete,
Ctime: dm.Ctime,
Mtime: dm.Mtime,
Content: &model.Content{
ID: dmid,
FontSize: dm.Content.FontSize,
Color: dm.Content.Color,
Mode: dm.Content.Mode,
IP: dm.Content.IP,
Plat: dm.Content.Plat,
Msg: dm.Content.Msg,
Ctime: dm.Content.Ctime,
Mtime: dm.Content.Mtime,
},
}
if dm.Pool == model.PoolSpecial {
forkDM.ContentSpe = &model.ContentSpecial{
ID: dmid,
Msg: dm.ContentSpe.Msg,
Ctime: dm.ContentSpe.Ctime,
Mtime: dm.ContentSpe.Mtime,
}
}
if err = s.bnjAddDM(c, choseSub, forkDM); err != nil {
return
}
return
}
// bnjAddDM add dm index and content to db by transaction.
func (s *Service) bnjAddDM(c context.Context, sub *model.Subject, dm *model.DM) (err error) {
if dm.State != model.StateAdminDelete {
return
}
tx, err := s.dao.BeginTran(c)
if err != nil {
return
}
// special dm
if dm.Pool == model.PoolSpecial && dm.ContentSpe != nil {
if _, err = s.dao.TxAddContentSpecial(tx, dm.ContentSpe); err != nil {
return tx.Rollback()
}
}
if _, err = s.dao.TxAddContent(tx, dm.Oid, dm.Content); err != nil {
return tx.Rollback()
}
if _, err = s.dao.TxAddIndex(tx, dm); err != nil {
return tx.Rollback()
}
if _, err = s.dao.TxIncrSubjectCount(tx, sub.Type, sub.Oid, 1, 0, sub.Childpool); err != nil {
return tx.Rollback()
}
return tx.Commit()
}
func (s *Service) genDMID(c context.Context) (dmid int64, err error) {
if dmid, err = s.seqRPC.ID(c, s.seqArg); err != nil {
log.Error("seqRPC.ID() error(%v)", err)
return
}
return
}
// bnjLiveDanmu laji live to video
// TODO stime
func (s *Service) bnjLiveDanmu(c context.Context, liveDanmu *model.LiveDanmu) (err error) {
var (
cid, dmid int64
progress float64
)
// ignore time before
if time.Since(s.bnjStart) < 0 {
return
}
// limit
if liveDanmu == nil || s.bnjliveRoomID <= 0 || s.bnjliveRoomID != liveDanmu.RoomID || liveDanmu.MsgType != model.LiveDanmuMsgTypeNormal {
return
}
if liveDanmu.UserLevel < s.bnjUserLevel {
return
}
if s.bnjIgnoreRate <= 0 || rand.Int63n(s.bnjIgnoreRate) != 0 {
return
}
if cid, progress, err = s.pickBnjVideo(c, liveDanmu.Time); err != nil {
return
}
// ignore illegal progress
if progress <= 0 {
return
}
if err = s.checkBnjDmMsg(c, liveDanmu.Content); err != nil {
log.Error("bnj bnjLiveDanmu checkBnjDmMsg(liveDanmu:%+v) error(%v)", liveDanmu, err)
return
}
if dmid, err = s.genDMID(c); err != nil {
log.Error("bnj bnjLiveDanmu genDMID() error(%v)", err)
return
}
now := time.Now().Unix()
forkDM := &model.DM{
ID: dmid,
Type: model.SubTypeVideo,
Oid: cid,
Mid: liveDanmu.UID,
Progress: int32(progress * 1000),
Pool: model.PoolNormal,
State: model.StateMonitorAfter,
Ctime: model.ConvertStime(time.Now()),
Mtime: model.ConvertStime(time.Now()),
Content: &model.Content{
ID: dmid,
FontSize: 25,
Color: 16777215,
Mode: model.ModeRolling,
Plat: 0,
Msg: liveDanmu.Content,
Ctime: xtime.Time(now),
Mtime: xtime.Time(now),
},
}
if err = s.bnjCheckFilterService(c, forkDM); err != nil {
log.Error("s.bnjCheckFilterService(%+v) error(%v)", forkDM, err)
return
}
var (
bs []byte
)
if bs, err = json.Marshal(forkDM); err != nil {
log.Error("json.Marshal(%+v) error(%v)", forkDM, err)
return
}
act := &model.Action{
Action: model.ActAddDM,
Data: bs,
}
if err = s.actionAct(c, act); err != nil {
log.Error("s.actionAddDM(%+v) error(%v)", liveDanmu, err)
return
}
return
}
func (s *Service) pickBnjVideo(c context.Context, timestamp int64) (cid int64, progress float64, err error) {
var (
idx int
video *model.Video
)
progress = float64(timestamp - s.bnjStart.Unix())
for idx, video = range s.bnjArcVideos {
if progress > float64(video.Duration) {
progress = progress - float64(video.Duration)
continue
}
// ignore p1 start
if idx != 0 && progress < s.bnjIgnoreBeginTime.Seconds() {
err = ecode.DMProgressTooBig
return
}
if float64(video.Duration)-progress < s.bnjIgnoreEndTime.Seconds() {
err = ecode.DMProgressTooBig
return
}
if progress >= 0 {
progress = progress + float64(rand.Int31n(1000)/1000)
}
cid = video.Cid
return
}
err = ecode.DMProgressTooBig
return
}
func (s *Service) bnjCheckFilterService(c context.Context, dm *model.DM) (err error) {
var (
filterReply *filterMdl.FilterReply
)
if filterReply, err = s.filterRPC.Filter(c, &filterMdl.FilterReq{
Area: "danmu",
Message: dm.Content.Msg,
Id: dm.ID,
Oid: dm.Oid,
Mid: dm.Mid,
}); err != nil {
log.Error("checkFilterService(dm:%+v),err(%v)", dm, err)
return
}
if filterReply.Level > 0 || filterReply.Limit == model.SpamBlack || filterReply.Limit == model.SpamOverflow {
dm.State = model.StateFilter
log.Info("bnj filter service delete(dmid:%d,data:+%v)", dm.ID, filterReply)
}
return
}
func (s *Service) checkBnjDmMsg(c context.Context, msg string) (err error) {
var (
msgLen = len([]rune(msg))
)
if msgRegex.MatchString(msg) { // 空白弹幕
err = ecode.DMMsgIlleagel
return
}
if msgLen > _bnjDmMsgLen {
err = ecode.DMMsgTooLong
return
}
if strings.Contains(msg, `\n`) || strings.Contains(msg, `/n`) {
err = ecode.DMMsgIlleagel
return
}
return
}
func (s *Service) bnjLiveConfig(c context.Context) (err error) {
var (
bnjConf
|
ig *model.BnjLiveConfig
start time.Time
)
if bnjConfig, err = s.dao.BnjConfig(c); err != nil {
log.Error("bnjLiveConfig error current:%v err:%+v", time.Now().String(), err)
return
}
if bnjConfig == nil {
log.Error("bnjLiveConfig error current:%v bnjConfig nil", time.Now().String())
return
}
if start, err = time.ParseInLocation(_dateFormat, bnjConfig.DanmuDtarTime, time.Now().Location()); err != nil {
log.Error("bnjLiveConfig start time error current:%v config:%+v", time.Now().String(), bnjConfig)
return
}
if bnjConfig.CommentID <= 0 || bnjConfig.RoomID <= 0 {
log.Info("bnjLiveConfig illegal current:%v config:%+v", time.Now().String(), bnjConfig)
return
}
s.bnjAid = bnjConfig.CommentID
|
identifier_body
|
|
bnj.go
|
ID
s.bnjUserLevel = s.conf.BNJ.BnjLiveDanmu.Level
if s.bnjStart, err = time.ParseInLocation(_dateFormat, s.conf.BNJ.BnjLiveDanmu.Start, time.Now().Location()); err != nil {
panic(err)
}
s.bnjCsmr = databus.New(s.conf.Databus.BnjCsmr)
log.Info("bnj init start:%v room_id:%v", s.bnjStart.String(), s.conf.BNJ.BnjLiveDanmu.RoomID)
go s.bnjProc()
}
func (s *Service) bnjProc() {
var (
err error
c = context.Background()
)
for {
msg, ok := <-s.bnjCsmr.Messages()
if !ok {
log.Error("bnj bnjProc consumer exit")
return
}
log.Info("bnj partition:%d,offset:%d,key:%s,value:%s", msg.Partition, msg.Offset, msg.Key, msg.Value)
m := &model.LiveDanmu{}
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%v) error(%v)", string(msg.Value), err)
continue
}
if err = s.bnjLiveDanmu(c, m); err != nil {
log.Error("bnj bnjLiveDanmu(msg:%+v),error(%v)", m, err)
continue
}
if err = msg.Commit(); err != nil {
log.Error("commit offset(%v) error(%v)", msg, err)
}
}
}
func (s *Service) bnjVideos(c context.Context) (err error) {
var (
videos []*model.Video
)
if videos, err = s.dao.Videos(c, s.bnjAid); err != nil {
log.Error("bnj bnjVideos(aid:%v) error(%v)", s.bnjAid, err)
return
}
if len(videos) >= 4 {
videos = videos[:4]
}
for _, video := range videos {
if err = s.syncBnjVideo(c, model.SubTypeVideo, video); err != nil {
log.Error("bnj syncBnjVideo(video:%+v) error(%v)", video, err)
return
}
}
s.bnjArcVideos = videos
return
}
func (s *Service) syncBnjVid
|
.Context, tp int32, v *model.Video) (err error) {
sub, err := s.dao.Subject(c, tp, v.Cid)
if err != nil {
return
}
if sub == nil {
if v.XCodeState >= model.VideoXcodeHDFinish {
if _, err = s.dao.AddSubject(c, tp, v.Cid, v.Aid, v.Mid, s.maxlimit(v.Duration), 0); err != nil {
return
}
}
} else {
if sub.Mid != v.Mid {
if _, err = s.dao.UpdateSubMid(c, tp, v.Cid, v.Mid); err != nil {
return
}
}
}
return
}
// bnjDmCount laji bnj count
func (s *Service) bnjDmCount(c context.Context, sub *model.Subject, dm *model.DM) (err error) {
var (
dmid int64
pages []*api.Page
chosen *api.Page
choseSub *model.Subject
)
if _, ok := s.bnjSubAids[sub.Pid]; !ok {
return
}
if pages, err = s.arcRPC.Page3(c, &arcMdl.ArgAid2{
Aid: s.bnjAid,
RealIP: metadata.String(c, metadata.RemoteIP),
}); err != nil {
log.Error("bnjDmCount Page3(aid:%v) error(%v)", sub.Pid, err)
return
}
if len(pages) <= 0 {
return
}
idx := time.Now().Unix() % int64(len(pages))
if chosen = pages[idx]; chosen == nil {
return
}
if choseSub, err = s.subject(c, model.SubTypeVideo, chosen.Cid); err != nil {
return
}
if dmid, err = s.genDMID(c); err != nil {
log.Error("bnjDmCount genDMID() error(%v)", err)
return
}
forkDM := &model.DM{
ID: dmid,
Type: model.SubTypeVideo,
Oid: chosen.Cid,
Mid: dm.Mid,
Progress: int32((chosen.Duration + 1) * 1000),
Pool: dm.Pool,
State: model.StateAdminDelete,
Ctime: dm.Ctime,
Mtime: dm.Mtime,
Content: &model.Content{
ID: dmid,
FontSize: dm.Content.FontSize,
Color: dm.Content.Color,
Mode: dm.Content.Mode,
IP: dm.Content.IP,
Plat: dm.Content.Plat,
Msg: dm.Content.Msg,
Ctime: dm.Content.Ctime,
Mtime: dm.Content.Mtime,
},
}
if dm.Pool == model.PoolSpecial {
forkDM.ContentSpe = &model.ContentSpecial{
ID: dmid,
Msg: dm.ContentSpe.Msg,
Ctime: dm.ContentSpe.Ctime,
Mtime: dm.ContentSpe.Mtime,
}
}
if err = s.bnjAddDM(c, choseSub, forkDM); err != nil {
return
}
return
}
// bnjAddDM add dm index and content to db by transaction.
func (s *Service) bnjAddDM(c context.Context, sub *model.Subject, dm *model.DM) (err error) {
if dm.State != model.StateAdminDelete {
return
}
tx, err := s.dao.BeginTran(c)
if err != nil {
return
}
// special dm
if dm.Pool == model.PoolSpecial && dm.ContentSpe != nil {
if _, err = s.dao.TxAddContentSpecial(tx, dm.ContentSpe); err != nil {
return tx.Rollback()
}
}
if _, err = s.dao.TxAddContent(tx, dm.Oid, dm.Content); err != nil {
return tx.Rollback()
}
if _, err = s.dao.TxAddIndex(tx, dm); err != nil {
return tx.Rollback()
}
if _, err = s.dao.TxIncrSubjectCount(tx, sub.Type, sub.Oid, 1, 0, sub.Childpool); err != nil {
return tx.Rollback()
}
return tx.Commit()
}
func (s *Service) genDMID(c context.Context) (dmid int64, err error) {
if dmid, err = s.seqRPC.ID(c, s.seqArg); err != nil {
log.Error("seqRPC.ID() error(%v)", err)
return
}
return
}
// bnjLiveDanmu laji live to video
// TODO stime
func (s *Service) bnjLiveDanmu(c context.Context, liveDanmu *model.LiveDanmu) (err error) {
var (
cid, dmid int64
progress float64
)
// ignore time before
if time.Since(s.bnjStart) < 0 {
return
}
// limit
if liveDanmu == nil || s.bnjliveRoomID <= 0 || s.bnjliveRoomID != liveDanmu.RoomID || liveDanmu.MsgType != model.LiveDanmuMsgTypeNormal {
return
}
if liveDanmu.UserLevel < s.bnjUserLevel {
return
}
if s.bnjIgnoreRate <= 0 || rand.Int63n(s.bnjIgnoreRate) != 0 {
return
}
if cid, progress, err = s.pickBnjVideo(c, liveDanmu.Time); err != nil {
return
}
// ignore illegal progress
if progress <= 0 {
return
}
if err = s.checkBnjDmMsg(c, liveDanmu.Content); err != nil {
log.Error("bnj bnjLiveDanmu checkBnjDmMsg(liveDanmu:%+v) error(%v)", liveDanmu, err)
return
}
if dmid, err = s.genDMID(c); err != nil {
log.Error("bnj bnjLiveDanmu genDMID() error(%v)", err)
return
}
now := time.Now().Unix()
forkDM := &model.DM{
ID: dmid,
Type: model.SubTypeVideo,
Oid: cid,
Mid: liveDanmu.UID,
Progress: int32(progress * 1000),
Pool: model.PoolNormal,
State: model.StateMonitorAfter,
Ctime: model.ConvertStime(time.Now()),
Mtime: model.ConvertStime(time.Now()),
Content: &model.Content{
ID: dmid,
FontSize: 25,
Color: 1677
|
eo(c context
|
identifier_name
|
bnj.go
|
ID
s.bnjUserLevel = s.conf.BNJ.BnjLiveDanmu.Level
if s.bnjStart, err = time.ParseInLocation(_dateFormat, s.conf.BNJ.BnjLiveDanmu.Start, time.Now().Location()); err != nil {
panic(err)
}
s.bnjCsmr = databus.New(s.conf.Databus.BnjCsmr)
log.Info("bnj init start:%v room_id:%v", s.bnjStart.String(), s.conf.BNJ.BnjLiveDanmu.RoomID)
go s.bnjProc()
}
func (s *Service) bnjProc() {
var (
err error
c = context.Background()
)
for {
msg, ok := <-s.bnjCsmr.Messages()
if !ok {
log.Error("bnj bnjProc consumer exit")
return
}
log.Info("bnj partition:%d,offset:%d,key:%s,value:%s", msg.Partition, msg.Offset, msg.Key, msg.Value)
m := &model.LiveDanmu{}
if err = json.Unmarshal(msg.Value, m); err != nil {
log.Error("json.Unmarshal(%v) error(%v)", string(msg.Value), err)
continue
}
if err = s.bnjLiveDanmu(c, m); err != nil {
log.Error("bnj bnjLiveDanmu(msg:%+v),error(%v)", m, err)
continue
}
if err = msg.Commit(); err != nil {
log.Error("commit offset(%v) error(%v)", msg, err)
}
}
}
func (s *Service) bnjVideos(c context.Context) (err error) {
var (
videos []*model.Video
)
if videos, err = s.dao.Videos(c, s.bnjAid); err != nil {
log.Error("bnj bnjVideos(aid:%v) error(%v)", s.bnjAid, err)
return
}
if len(videos) >= 4 {
videos = videos[:4]
}
for _, video := range videos {
if err = s.syncBnjVideo(c, model.SubTypeVideo, video); err != nil {
log.Error("bnj syncBnjVideo(video:%+v) error(%v)", video, err)
return
}
}
s.bnjArcVideos = videos
return
}
func (s *Service) syncBnjVideo(c context.Context, tp int32, v *model.Video) (err error) {
sub, err := s.dao.Subject(c, tp, v.Cid)
if err != nil {
return
}
if sub == nil {
if v.XCodeState >= model.VideoXcodeHDFinish {
if _, err = s.dao.AddSubject(c, tp, v.Cid, v.Aid, v.Mid, s.maxlimit(v.Duration), 0); err != nil {
return
}
}
} else {
if sub.Mid != v.Mid {
if _, err = s.dao.UpdateSubMid(c, tp, v.Cid, v.Mid); err != nil {
return
}
}
}
return
}
// bnjDmCount laji bnj count
func (s *Service) bnjDmCount(c context.Context, sub *model.Subject, dm *model.DM) (err error) {
var (
dmid int64
pages []*api.Page
chosen *api.Page
choseSub *model.Subject
)
if _, ok := s.bnjSubAids[sub.Pid]; !ok {
return
}
if pages, err = s.arcRPC.Page3(c, &arcMdl.ArgAid2{
Aid: s.bnjAid,
RealIP: metadata.String(c, metadata.RemoteIP),
}); err != nil {
log.Error("bnjDmCount Page3(aid:%v) error(%v)", sub.Pid, err)
return
}
if len(pages) <= 0 {
return
}
idx := time.Now().Unix() % int64(len(pages))
if chosen = pages[idx]; chosen == nil {
return
}
if choseSub, err = s.subject(c, model.SubTypeVideo, chosen.Cid); err != nil {
return
}
if dmid, err = s.genDMID(c); err != nil {
log.Er
|
= &model.DM{
ID: dmid,
Type: model.SubTypeVideo,
Oid: chosen.Cid,
Mid: dm.Mid,
Progress: int32((chosen.Duration + 1) * 1000),
Pool: dm.Pool,
State: model.StateAdminDelete,
Ctime: dm.Ctime,
Mtime: dm.Mtime,
Content: &model.Content{
ID: dmid,
FontSize: dm.Content.FontSize,
Color: dm.Content.Color,
Mode: dm.Content.Mode,
IP: dm.Content.IP,
Plat: dm.Content.Plat,
Msg: dm.Content.Msg,
Ctime: dm.Content.Ctime,
Mtime: dm.Content.Mtime,
},
}
if dm.Pool == model.PoolSpecial {
forkDM.ContentSpe = &model.ContentSpecial{
ID: dmid,
Msg: dm.ContentSpe.Msg,
Ctime: dm.ContentSpe.Ctime,
Mtime: dm.ContentSpe.Mtime,
}
}
if err = s.bnjAddDM(c, choseSub, forkDM); err != nil {
return
}
return
}
// bnjAddDM add dm index and content to db by transaction.
func (s *Service) bnjAddDM(c context.Context, sub *model.Subject, dm *model.DM) (err error) {
if dm.State != model.StateAdminDelete {
return
}
tx, err := s.dao.BeginTran(c)
if err != nil {
return
}
// special dm
if dm.Pool == model.PoolSpecial && dm.ContentSpe != nil {
if _, err = s.dao.TxAddContentSpecial(tx, dm.ContentSpe); err != nil {
return tx.Rollback()
}
}
if _, err = s.dao.TxAddContent(tx, dm.Oid, dm.Content); err != nil {
return tx.Rollback()
}
if _, err = s.dao.TxAddIndex(tx, dm); err != nil {
return tx.Rollback()
}
if _, err = s.dao.TxIncrSubjectCount(tx, sub.Type, sub.Oid, 1, 0, sub.Childpool); err != nil {
return tx.Rollback()
}
return tx.Commit()
}
func (s *Service) genDMID(c context.Context) (dmid int64, err error) {
if dmid, err = s.seqRPC.ID(c, s.seqArg); err != nil {
log.Error("seqRPC.ID() error(%v)", err)
return
}
return
}
// bnjLiveDanmu laji live to video
// TODO stime
func (s *Service) bnjLiveDanmu(c context.Context, liveDanmu *model.LiveDanmu) (err error) {
var (
cid, dmid int64
progress float64
)
// ignore time before
if time.Since(s.bnjStart) < 0 {
return
}
// limit
if liveDanmu == nil || s.bnjliveRoomID <= 0 || s.bnjliveRoomID != liveDanmu.RoomID || liveDanmu.MsgType != model.LiveDanmuMsgTypeNormal {
return
}
if liveDanmu.UserLevel < s.bnjUserLevel {
return
}
if s.bnjIgnoreRate <= 0 || rand.Int63n(s.bnjIgnoreRate) != 0 {
return
}
if cid, progress, err = s.pickBnjVideo(c, liveDanmu.Time); err != nil {
return
}
// ignore illegal progress
if progress <= 0 {
return
}
if err = s.checkBnjDmMsg(c, liveDanmu.Content); err != nil {
log.Error("bnj bnjLiveDanmu checkBnjDmMsg(liveDanmu:%+v) error(%v)", liveDanmu, err)
return
}
if dmid, err = s.genDMID(c); err != nil {
log.Error("bnj bnjLiveDanmu genDMID() error(%v)", err)
return
}
now := time.Now().Unix()
forkDM := &model.DM{
ID: dmid,
Type: model.SubTypeVideo,
Oid: cid,
Mid: liveDanmu.UID,
Progress: int32(progress * 1000),
Pool: model.PoolNormal,
State: model.StateMonitorAfter,
Ctime: model.ConvertStime(time.Now()),
Mtime: model.ConvertStime(time.Now()),
Content: &model.Content{
ID: dmid,
FontSize: 25,
Color: 167
|
ror("bnjDmCount genDMID() error(%v)", err)
return
}
forkDM :
|
conditional_block
|
raw.py
|
Type]
unlocked: bool
unlockedTime: NumberType
getHeapStatistics: Optional[Callable[[], HeapStatType]]
getUsed: Callable[[], NumberType]
halt: Optional[Callable[[], None]]
setShardLimits: Callable[[Dict[str, NumberType]], NumberType]
unlock: Callable[[], NumberType]
generatePixel: Callable[[], NumberType]
class GLType(TypedDict):
level: NumberType
progress: NumberType
progressTotal: NumberType
class ShardType(TypedDict):
name: str
type: str
ptr: bool
class MultiRoomRouteOpts(TypedDict):
routeCallback: Callable[[str, str], NumberType]
class MultiRoomRouteOutput(TypedDict):
exit: NumberType
room: str
class RoomStatus(TypedDict):
status: str
timestamp: NumberType
class LineStyle(TypedDict):
width: Optional[NumberType]
color: Optional[str]
opacity: Optional[NumberType]
lineStyle: Optional[str]
class ShapeStyle(TypedDict):
fill: Optional[str]
opacity: Optional[NumberType]
stroke: Optional[str]
strokeWidth: Optional[NumberType]
lineStyle: Optional[str]
class CircleStyle(ShapeStyle):
radius: Optional[NumberType]
class TextStyle(TypedDict):
color: Optional[str]
fontFamily: Optional[str]
fontSize: Optional[NumberType]
fontStyle: Optional[str]
fontVariant: Optional[str]
stroke: Optional[str]
strokeWidth: Optional[NumberType]
backgroundColor: Optional[str]
backgroundPadding: Optional[NumberType]
align: Optional[str]
opacity: Optional[NumberType]
class MapVisual():
line: Callable[[RoomPos, RoomPos, Optional[LineStyle]], MapVisual]
circle: Callable[[RoomPos, Optional[CircleStyle]], MapVisual]
rect: Callable[[RoomPos, NumberType, NumberType, Optional[ShapeStyle]], MapVisual]
poly: Callable[[List[RoomPos], Optional[ShapeStyle]], MapVisual]
text: Callable[[str, RoomPos, Optional[TextStyle]], MapVisual]
clear: Callable[[], MapVisual]
getSize: Callable[[], NumberType]
toString: Callable[[], str]
fromString: Callable[[str], MapVisual]
class Map():
describeExits: Callable[[str], Dict[str, str]]
findExit: Callable[[*[StrOrRoom] * 2, Optional[RouteOpts]], Union[NumberType, NumberType]]
findRoute: Callable[[*[StrOrRoom] * 2, Optional[MultiRoomRouteOpts]], Union[List[MultiRoomRouteOutput], NumberType]]
getRoomLinearDistance: Callable[[str, str, Optional[bool]], NumberType]
getRoomTerrain: Callable[[str], RoomTerrain]
getWorldSize: Callable[[], NumberType]
getRoomStatus: Callable[[str], RoomStatus]
visual: MapVisual
class TransactionPlayer(TypedDict):
username: str
class TransactionOrder(TypedDict):
id: str
type: str
price: NumberType
class Transaction(TypedDict):
transactionId: str
time: NumberType
sender: TransactionPlayer
recipient: TransactionPlayer
resourceType: str
amount: NumberType
to: str
description: str
order: TransactionOrder
Transaction.__annotations__.setdefault("from", str)
class Order(TypedDict):
id: str
created: NumberType
active: bool
type: str
resourceType: str
roomName: str
amount: NumberType
remainingAmount: NumberType
totalAmount: NumberType
price: NumberType
class OrderParams(TypedDict):
type: str
resourceType: str
price: NumberType
totalAmount: NumberType
roomName: Optional[str]
LodashFilter = Callable[[Any, Union[str, NumberType], Iterable], bool]
class OrderData(TypedDict):
id: str
created: NumberType
createdTimestamp: Optional[NumberType]
type: str
resourceType: str
roomName: str
amount: NumberType
remainingAmount: NumberType
price: NumberType
class ResourcePriceHistory(TypedDict):
resourceType: str
date: str
transactions: NumberType
volume: NumberType
avgPrice: NumberType
stddevPrice: NumberType
class Market():
credits: NumberType
incomingTransactions: List[Transaction]
outgoingTransactions: List[Transaction]
orders: Dict[str, Order]
calcTransactionCost: Callable[[NumberType, str, str], NumberType]
cancelOrder: Callable[[str], NumberType]
changeOrderPrice: Callable[[str, NumberType], NumberType]
createOrder: Callable[[OrderParams], NumberType]
deal: Callable[[str, NumberType, Optional[str]], NumberType]
extendOrder: Callable[[str, NumberType], NumberType]
getAllOrders: Callable[[Optional[LodashFilter]], List[OrderData]]
getHistory: Callable[[str], List[ResourcePriceHistory]]
getOrderById: Callable[[str], OrderData]
class Game():
|
class InterShardMemory():
getLocal: Callable[[], str]
setLocal: Callable[[str], None]
getRemote: Callable[[str], str]
class PathFinderOpts(TypedDict):
roomCallback: Optional[Callable[[str], Union[CostMatrix, bool]]]
plainCost: Optional[NumberType]
swampCost: Optional[NumberType]
flee: Optional[bool]
maxOps: Optional[NumberType]
maxRooms: Optional[NumberType]
maxCost: Optional[NumberType]
heuristicWeight: Optional[NumberType]
class Path(TypedDict):
path: List[RoomPos]
ops: NumberType
cost: NumberType
incomplete: bool
class PathFinder():
search: Callable[[RoomPos, Union[Goal, List[Goal]], Optional[PathFinderOpts]], Path]
class Effect(TypedDict):
effect: NumberType
level: Optional[NumberType]
ticksRemaining: NumberType
class RoomObject():
effects: List[Effect]
pos: RoomPos
room: Union[Room, None]
class OwnerDict(TypedDict):
username: str
class ConstructionSite(RoomObject):
id: str
my: bool
owner: OwnerDict
progress: NumberType
progressTotal: NumberType
structureType: str
remove: Callable[[], NumberType]
class CreepBodyPart(TypedDict):
boost: Union[str, None]
type: str
hits: NumberType
class DropResourceData(TypedDict):
resourceType: str
amount: NumberType
class CreepMoveToOpts(RoomFindPathOpts):
reusePath: Optional[NumberType]
serializeMemory: Optional[bool]
noPathFinding: Optional[bool]
visualizePathStyle: Optional[RoomVisualPolyStyle]
Pos = Union[RoomObject, RoomPos]
class BaseCreep(RoomObject):
hits: NumberType
hitsMax: NumberType
id: str
memory: Any
my: bool
name: str
owner: OwnerDict
saying: str
store: Store
ticksToLive: NumberType
cancelOrder: Callable[[str], NumberType]
drop: Callable[[DropResourceData], NumberType]
move: Callable[[Union[Creep, NumberType]], NumberType]
moveByPath: Callable[[Union[list, str]], NumberType]
moveTo: Union[
Callable[[NumberType, NumberType, Optional[CreepMoveToOpts]], NumberType],
Callable[[Pos, Optional[CreepMoveToOpts]], NumberType]]
notifyWhenAttacked: Callable[[bool], NumberType]
pickup: Callable[[Resource], NumberType]
say: Callable[[str, Optional[bool]], NumberType]
suicide: Callable[[], NumberType]
transfer: Callable[[Union[Creep, PowerCreep, Structure], str, Optional[NumberType]], NumberType]
withdraw: Callable[[Union[Structure, Tombstone, Ruin], str, Optional[NumberType]], NumberType]
class Creep(BaseCreep):
body: List[CreepBodyPart]
fatigue: NumberType
spawning: bool
attack: Callable[[Union[Creep, PowerCreep, Structure]], NumberType]
attackController: Callable[[StructureController], NumberType]
build: Callable[[ConstructionSite], NumberType]
claimController: Callable[[StructureController], NumberType]
dismantle: Callable[[Structure], NumberType]
generateSafeMode: Callable[[StructureController], NumberType]
getActiveBodyparts: Callable[[str], NumberType]
harvest: Callable[[Union[Source, Mineral, Deposit]], NumberType]
heal: Callable[[Union[Creep, PowerCreep]], NumberType]
pull: Callable[[Creep], NumberType]
rangedAttack: Callable[[Union[Creep, PowerCreep, Structure]], NumberType]
rangedHeal: Callable[[Union[Creep, PowerCreep]], NumberType
|
constructionSites: Dict[str, ConstructionSite]
cpu: CPUType
creeps: Dict[str, Creep]
flags: Dict[str, Flag]
gcl: GLType
gpl: GLType
map: Map
market: Market
powerCreeps: Dict[str, PowerCreep]
resources: ResourcesType
rooms: Dict[str, Room]
shard: ShardType
spawns: Dict[str, StructureSpawn]
structures: Dict[str, Structure]
time: NumberType
getObjectById: Callable([str], Union[GameObject, None])
notify: Callable[[str, Optional[NumberType]], None]
|
identifier_body
|
raw.py
|
Type]
unlocked: bool
unlockedTime: NumberType
getHeapStatistics: Optional[Callable[[], HeapStatType]]
getUsed: Callable[[], NumberType]
halt: Optional[Callable[[], None]]
setShardLimits: Callable[[Dict[str, NumberType]], NumberType]
unlock: Callable[[], NumberType]
generatePixel: Callable[[], NumberType]
class GLType(TypedDict):
|
class ShardType(TypedDict):
name: str
type: str
ptr: bool
class MultiRoomRouteOpts(TypedDict):
routeCallback: Callable[[str, str], NumberType]
class MultiRoomRouteOutput(TypedDict):
exit: NumberType
room: str
class RoomStatus(TypedDict):
status: str
timestamp: NumberType
class LineStyle(TypedDict):
width: Optional[NumberType]
color: Optional[str]
opacity: Optional[NumberType]
lineStyle: Optional[str]
class ShapeStyle(TypedDict):
fill: Optional[str]
opacity: Optional[NumberType]
stroke: Optional[str]
strokeWidth: Optional[NumberType]
lineStyle: Optional[str]
class CircleStyle(ShapeStyle):
radius: Optional[NumberType]
class TextStyle(TypedDict):
color: Optional[str]
fontFamily: Optional[str]
fontSize: Optional[NumberType]
fontStyle: Optional[str]
fontVariant: Optional[str]
stroke: Optional[str]
strokeWidth: Optional[NumberType]
backgroundColor: Optional[str]
backgroundPadding: Optional[NumberType]
align: Optional[str]
opacity: Optional[NumberType]
class MapVisual():
line: Callable[[RoomPos, RoomPos, Optional[LineStyle]], MapVisual]
circle: Callable[[RoomPos, Optional[CircleStyle]], MapVisual]
rect: Callable[[RoomPos, NumberType, NumberType, Optional[ShapeStyle]], MapVisual]
poly: Callable[[List[RoomPos], Optional[ShapeStyle]], MapVisual]
text: Callable[[str, RoomPos, Optional[TextStyle]], MapVisual]
clear: Callable[[], MapVisual]
getSize: Callable[[], NumberType]
toString: Callable[[], str]
fromString: Callable[[str], MapVisual]
class Map():
describeExits: Callable[[str], Dict[str, str]]
findExit: Callable[[*[StrOrRoom] * 2, Optional[RouteOpts]], Union[NumberType, NumberType]]
findRoute: Callable[[*[StrOrRoom] * 2, Optional[MultiRoomRouteOpts]], Union[List[MultiRoomRouteOutput], NumberType]]
getRoomLinearDistance: Callable[[str, str, Optional[bool]], NumberType]
getRoomTerrain: Callable[[str], RoomTerrain]
getWorldSize: Callable[[], NumberType]
getRoomStatus: Callable[[str], RoomStatus]
visual: MapVisual
class TransactionPlayer(TypedDict):
username: str
class TransactionOrder(TypedDict):
id: str
type: str
price: NumberType
class Transaction(TypedDict):
transactionId: str
time: NumberType
sender: TransactionPlayer
recipient: TransactionPlayer
resourceType: str
amount: NumberType
to: str
description: str
order: TransactionOrder
Transaction.__annotations__.setdefault("from", str)
class Order(TypedDict):
id: str
created: NumberType
active: bool
type: str
resourceType: str
roomName: str
amount: NumberType
remainingAmount: NumberType
totalAmount: NumberType
price: NumberType
class OrderParams(TypedDict):
type: str
resourceType: str
price: NumberType
totalAmount: NumberType
roomName: Optional[str]
LodashFilter = Callable[[Any, Union[str, NumberType], Iterable], bool]
class OrderData(TypedDict):
id: str
created: NumberType
createdTimestamp: Optional[NumberType]
type: str
resourceType: str
roomName: str
amount: NumberType
remainingAmount: NumberType
price: NumberType
class ResourcePriceHistory(TypedDict):
resourceType: str
date: str
transactions: NumberType
volume: NumberType
avgPrice: NumberType
stddevPrice: NumberType
class Market():
credits: NumberType
incomingTransactions: List[Transaction]
outgoingTransactions: List[Transaction]
orders: Dict[str, Order]
calcTransactionCost: Callable[[NumberType, str, str], NumberType]
cancelOrder: Callable[[str], NumberType]
changeOrderPrice: Callable[[str, NumberType], NumberType]
createOrder: Callable[[OrderParams], NumberType]
deal: Callable[[str, NumberType, Optional[str]], NumberType]
extendOrder: Callable[[str, NumberType], NumberType]
getAllOrders: Callable[[Optional[LodashFilter]], List[OrderData]]
getHistory: Callable[[str], List[ResourcePriceHistory]]
getOrderById: Callable[[str], OrderData]
class Game():
constructionSites: Dict[str, ConstructionSite]
cpu: CPUType
creeps: Dict[str, Creep]
flags: Dict[str, Flag]
gcl: GLType
gpl: GLType
map: Map
market: Market
powerCreeps: Dict[str, PowerCreep]
resources: ResourcesType
rooms: Dict[str, Room]
shard: ShardType
spawns: Dict[str, StructureSpawn]
structures: Dict[str, Structure]
time: NumberType
getObjectById: Callable([str], Union[GameObject, None])
notify: Callable[[str, Optional[NumberType]], None]
class InterShardMemory():
getLocal: Callable[[], str]
setLocal: Callable[[str], None]
getRemote: Callable[[str], str]
class PathFinderOpts(TypedDict):
roomCallback: Optional[Callable[[str], Union[CostMatrix, bool]]]
plainCost: Optional[NumberType]
swampCost: Optional[NumberType]
flee: Optional[bool]
maxOps: Optional[NumberType]
maxRooms: Optional[NumberType]
maxCost: Optional[NumberType]
heuristicWeight: Optional[NumberType]
class Path(TypedDict):
path: List[RoomPos]
ops: NumberType
cost: NumberType
incomplete: bool
class PathFinder():
search: Callable[[RoomPos, Union[Goal, List[Goal]], Optional[PathFinderOpts]], Path]
class Effect(TypedDict):
effect: NumberType
level: Optional[NumberType]
ticksRemaining: NumberType
class RoomObject():
effects: List[Effect]
pos: RoomPos
room: Union[Room, None]
class OwnerDict(TypedDict):
username: str
class ConstructionSite(RoomObject):
id: str
my: bool
owner: OwnerDict
progress: NumberType
progressTotal: NumberType
structureType: str
remove: Callable[[], NumberType]
class CreepBodyPart(TypedDict):
boost: Union[str, None]
type: str
hits: NumberType
class DropResourceData(TypedDict):
resourceType: str
amount: NumberType
class CreepMoveToOpts(RoomFindPathOpts):
reusePath: Optional[NumberType]
serializeMemory: Optional[bool]
noPathFinding: Optional[bool]
visualizePathStyle: Optional[RoomVisualPolyStyle]
Pos = Union[RoomObject, RoomPos]
class BaseCreep(RoomObject):
hits: NumberType
hitsMax: NumberType
id: str
memory: Any
my: bool
name: str
owner: OwnerDict
saying: str
store: Store
ticksToLive: NumberType
cancelOrder: Callable[[str], NumberType]
drop: Callable[[DropResourceData], NumberType]
move: Callable[[Union[Creep, NumberType]], NumberType]
moveByPath: Callable[[Union[list, str]], NumberType]
moveTo: Union[
Callable[[NumberType, NumberType, Optional[CreepMoveToOpts]], NumberType],
Callable[[Pos, Optional[CreepMoveToOpts]], NumberType]]
notifyWhenAttacked: Callable[[bool], NumberType]
pickup: Callable[[Resource], NumberType]
say: Callable[[str, Optional[bool]], NumberType]
suicide: Callable[[], NumberType]
transfer: Callable[[Union[Creep, PowerCreep, Structure], str, Optional[NumberType]], NumberType]
withdraw: Callable[[Union[Structure, Tombstone, Ruin], str, Optional[NumberType]], NumberType]
class Creep(BaseCreep):
body: List[CreepBodyPart]
fatigue: NumberType
spawning: bool
attack: Callable[[Union[Creep, PowerCreep, Structure]], NumberType]
attackController: Callable[[StructureController], NumberType]
build: Callable[[ConstructionSite], NumberType]
claimController: Callable[[StructureController], NumberType]
dismantle: Callable[[Structure], NumberType]
generateSafeMode: Callable[[StructureController], NumberType]
getActiveBodyparts: Callable[[str], NumberType]
harvest: Callable[[Union[Source, Mineral, Deposit]], NumberType]
heal: Callable[[Union[Creep, PowerCreep]], NumberType]
pull: Callable[[Creep], NumberType]
rangedAttack: Callable[[Union[Creep, PowerCreep, Structure]], NumberType]
rangedHeal: Callable[[Union[Creep, PowerCreep]], NumberType]
|
level: NumberType
progress: NumberType
progressTotal: NumberType
|
random_line_split
|
raw.py
|
Type]
unlocked: bool
unlockedTime: NumberType
getHeapStatistics: Optional[Callable[[], HeapStatType]]
getUsed: Callable[[], NumberType]
halt: Optional[Callable[[], None]]
setShardLimits: Callable[[Dict[str, NumberType]], NumberType]
unlock: Callable[[], NumberType]
generatePixel: Callable[[], NumberType]
class GLType(TypedDict):
level: NumberType
progress: NumberType
progressTotal: NumberType
class ShardType(TypedDict):
name: str
type: str
ptr: bool
class MultiRoomRouteOpts(TypedDict):
routeCallback: Callable[[str, str], NumberType]
class MultiRoomRouteOutput(TypedDict):
exit: NumberType
room: str
class RoomStatus(TypedDict):
status: str
timestamp: NumberType
class LineStyle(TypedDict):
width: Optional[NumberType]
color: Optional[str]
opacity: Optional[NumberType]
lineStyle: Optional[str]
class ShapeStyle(TypedDict):
fill: Optional[str]
opacity: Optional[NumberType]
stroke: Optional[str]
strokeWidth: Optional[NumberType]
lineStyle: Optional[str]
class CircleStyle(ShapeStyle):
radius: Optional[NumberType]
class TextStyle(TypedDict):
color: Optional[str]
fontFamily: Optional[str]
fontSize: Optional[NumberType]
fontStyle: Optional[str]
fontVariant: Optional[str]
stroke: Optional[str]
strokeWidth: Optional[NumberType]
backgroundColor: Optional[str]
backgroundPadding: Optional[NumberType]
align: Optional[str]
opacity: Optional[NumberType]
class MapVisual():
line: Callable[[RoomPos, RoomPos, Optional[LineStyle]], MapVisual]
circle: Callable[[RoomPos, Optional[CircleStyle]], MapVisual]
rect: Callable[[RoomPos, NumberType, NumberType, Optional[ShapeStyle]], MapVisual]
poly: Callable[[List[RoomPos], Optional[ShapeStyle]], MapVisual]
text: Callable[[str, RoomPos, Optional[TextStyle]], MapVisual]
clear: Callable[[], MapVisual]
getSize: Callable[[], NumberType]
toString: Callable[[], str]
fromString: Callable[[str], MapVisual]
class Map():
describeExits: Callable[[str], Dict[str, str]]
findExit: Callable[[*[StrOrRoom] * 2, Optional[RouteOpts]], Union[NumberType, NumberType]]
findRoute: Callable[[*[StrOrRoom] * 2, Optional[MultiRoomRouteOpts]], Union[List[MultiRoomRouteOutput], NumberType]]
getRoomLinearDistance: Callable[[str, str, Optional[bool]], NumberType]
getRoomTerrain: Callable[[str], RoomTerrain]
getWorldSize: Callable[[], NumberType]
getRoomStatus: Callable[[str], RoomStatus]
visual: MapVisual
class TransactionPlayer(TypedDict):
username: str
class TransactionOrder(TypedDict):
id: str
type: str
price: NumberType
class Transaction(TypedDict):
transactionId: str
time: NumberType
sender: TransactionPlayer
recipient: TransactionPlayer
resourceType: str
amount: NumberType
to: str
description: str
order: TransactionOrder
Transaction.__annotations__.setdefault("from", str)
class Order(TypedDict):
id: str
created: NumberType
active: bool
type: str
resourceType: str
roomName: str
amount: NumberType
remainingAmount: NumberType
totalAmount: NumberType
price: NumberType
class OrderParams(TypedDict):
type: str
resourceType: str
price: NumberType
totalAmount: NumberType
roomName: Optional[str]
LodashFilter = Callable[[Any, Union[str, NumberType], Iterable], bool]
class OrderData(TypedDict):
id: str
created: NumberType
createdTimestamp: Optional[NumberType]
type: str
resourceType: str
roomName: str
amount: NumberType
remainingAmount: NumberType
price: NumberType
class ResourcePriceHistory(TypedDict):
resourceType: str
date: str
transactions: NumberType
volume: NumberType
avgPrice: NumberType
stddevPrice: NumberType
class Market():
credits: NumberType
incomingTransactions: List[Transaction]
outgoingTransactions: List[Transaction]
orders: Dict[str, Order]
calcTransactionCost: Callable[[NumberType, str, str], NumberType]
cancelOrder: Callable[[str], NumberType]
changeOrderPrice: Callable[[str, NumberType], NumberType]
createOrder: Callable[[OrderParams], NumberType]
deal: Callable[[str, NumberType, Optional[str]], NumberType]
extendOrder: Callable[[str, NumberType], NumberType]
getAllOrders: Callable[[Optional[LodashFilter]], List[OrderData]]
getHistory: Callable[[str], List[ResourcePriceHistory]]
getOrderById: Callable[[str], OrderData]
class Game():
constructionSites: Dict[str, ConstructionSite]
cpu: CPUType
creeps: Dict[str, Creep]
flags: Dict[str, Flag]
gcl: GLType
gpl: GLType
map: Map
market: Market
powerCreeps: Dict[str, PowerCreep]
resources: ResourcesType
rooms: Dict[str, Room]
shard: ShardType
spawns: Dict[str, StructureSpawn]
structures: Dict[str, Structure]
time: NumberType
getObjectById: Callable([str], Union[GameObject, None])
notify: Callable[[str, Optional[NumberType]], None]
class InterShardMemory():
getLocal: Callable[[], str]
setLocal: Callable[[str], None]
getRemote: Callable[[str], str]
class PathFinderOpts(TypedDict):
roomCallback: Optional[Callable[[str], Union[CostMatrix, bool]]]
plainCost: Optional[NumberType]
swampCost: Optional[NumberType]
flee: Optional[bool]
maxOps: Optional[NumberType]
maxRooms: Optional[NumberType]
maxCost: Optional[NumberType]
heuristicWeight: Optional[NumberType]
class Path(TypedDict):
path: List[RoomPos]
ops: NumberType
cost: NumberType
incomplete: bool
class PathFinder():
search: Callable[[RoomPos, Union[Goal, List[Goal]], Optional[PathFinderOpts]], Path]
class Effect(TypedDict):
effect: NumberType
level: Optional[NumberType]
ticksRemaining: NumberType
class RoomObject():
effects: List[Effect]
pos: RoomPos
room: Union[Room, None]
class OwnerDict(TypedDict):
username: str
class ConstructionSite(RoomObject):
id: str
my: bool
owner: OwnerDict
progress: NumberType
progressTotal: NumberType
structureType: str
remove: Callable[[], NumberType]
class CreepBodyPart(TypedDict):
boost: Union[str, None]
type: str
hits: NumberType
class DropResourceData(TypedDict):
resourceType: str
amount: NumberType
class
|
(RoomFindPathOpts):
reusePath: Optional[NumberType]
serializeMemory: Optional[bool]
noPathFinding: Optional[bool]
visualizePathStyle: Optional[RoomVisualPolyStyle]
Pos = Union[RoomObject, RoomPos]
class BaseCreep(RoomObject):
hits: NumberType
hitsMax: NumberType
id: str
memory: Any
my: bool
name: str
owner: OwnerDict
saying: str
store: Store
ticksToLive: NumberType
cancelOrder: Callable[[str], NumberType]
drop: Callable[[DropResourceData], NumberType]
move: Callable[[Union[Creep, NumberType]], NumberType]
moveByPath: Callable[[Union[list, str]], NumberType]
moveTo: Union[
Callable[[NumberType, NumberType, Optional[CreepMoveToOpts]], NumberType],
Callable[[Pos, Optional[CreepMoveToOpts]], NumberType]]
notifyWhenAttacked: Callable[[bool], NumberType]
pickup: Callable[[Resource], NumberType]
say: Callable[[str, Optional[bool]], NumberType]
suicide: Callable[[], NumberType]
transfer: Callable[[Union[Creep, PowerCreep, Structure], str, Optional[NumberType]], NumberType]
withdraw: Callable[[Union[Structure, Tombstone, Ruin], str, Optional[NumberType]], NumberType]
class Creep(BaseCreep):
body: List[CreepBodyPart]
fatigue: NumberType
spawning: bool
attack: Callable[[Union[Creep, PowerCreep, Structure]], NumberType]
attackController: Callable[[StructureController], NumberType]
build: Callable[[ConstructionSite], NumberType]
claimController: Callable[[StructureController], NumberType]
dismantle: Callable[[Structure], NumberType]
generateSafeMode: Callable[[StructureController], NumberType]
getActiveBodyparts: Callable[[str], NumberType]
harvest: Callable[[Union[Source, Mineral, Deposit]], NumberType]
heal: Callable[[Union[Creep, PowerCreep]], NumberType]
pull: Callable[[Creep], NumberType]
rangedAttack: Callable[[Union[Creep, PowerCreep, Structure]], NumberType]
rangedHeal: Callable[[Union[Creep, PowerCreep]], NumberType
|
CreepMoveToOpts
|
identifier_name
|
types.rs
|
vm::errors::VMResult;
use libra_types::access_path::AccessPath;
use serde::{Deserialize, Serialize};
/// VM representation of a struct type in Move.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "fuzzing", derive(Eq, PartialEq))]
pub struct FatStructType {
pub address: AccountAddress,
pub module: Identifier,
pub name: Identifier,
pub is_resource: bool,
pub ty_args: Vec<FatType>,
pub layout: Vec<FatType>,
}
/// VM representation of a Move type that gives access to both the fully qualified
/// name and data layout of the type.
///
/// TODO: this data structure itself is intended to be used in runtime only and
/// should NOT be serialized in any form. Currently we still derive `Serialize` and
/// `Deserialize`, but this is a hack for fuzzing and should be guarded behind the
/// "fuzzing" feature flag. We should look into ways to get rid of this.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "fuzzing", derive(Eq, PartialEq))]
pub enum FatType {
Bool,
U8,
U64,
U128,
Address,
Signer,
Vector(Box<FatType>),
Struct(Box<FatStructType>),
Reference(Box<FatType>),
MutableReference(Box<FatType>),
TyParam(usize),
}
impl FatStructType {
pub fn resource_path(&self) -> VMResult<Vec<u8>> {
Ok(AccessPath::resource_access_vec(&self.struct_tag()?))
}
pub fn subst(&self, ty_args: &[FatType]) -> VMResult<FatStructType> {
Ok(Self {
address: self.address,
module: self.module.clone(),
name: self.name.clone(),
is_resource: self.is_resource,
ty_args: self
.ty_args
.iter()
.map(|ty| ty.subst(ty_args))
.collect::<VMResult<_>>()?,
layout: self
.layout
.iter()
.map(|ty| ty.subst(ty_args))
.collect::<VMResult<_>>()?,
})
}
pub fn struct_tag(&self) -> VMResult<StructTag> {
let ty_args = self
.ty_args
.iter()
.map(|ty| ty.type_tag())
.collect::<VMResult<Vec<_>>>()?;
Ok(StructTag {
address: self.address,
module: self.module.clone(),
name: self.name.clone(),
type_params: ty_args,
})
}
pub fn debug_print<B: Write>(&self, buf: &mut B) -> VMResult<()> {
debug_write!(buf, "{}::{}", self.module, self.name)?;
let mut it = self.ty_args.iter();
if let Some(ty) = it.next() {
debug_write!(buf, "<")?;
ty.debug_print(buf)?;
for ty in it {
debug_write!(buf, ", ")?;
ty.debug_print(buf)?;
}
debug_write!(buf, ">")?;
}
Ok(())
}
}
impl FatType {
pub fn subst(&self, ty_args: &[FatType]) -> VMResult<FatType> {
use FatType::*;
let res = match self {
TyParam(idx) => match ty_args.get(*idx) {
Some(ty) => ty.clone(),
None => {
return Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message(format!(
"fat type substitution failed: index out of bounds -- len {} got {}",
ty_args.len(),
idx
)));
}
},
Bool => Bool,
U8 => U8,
U64 => U64,
U128 => U128,
Address => Address,
Signer => Signer,
Vector(ty) => Vector(Box::new(ty.subst(ty_args)?)),
Reference(ty) => Reference(Box::new(ty.subst(ty_args)?)),
MutableReference(ty) => MutableReference(Box::new(ty.subst(ty_args)?)),
Struct(struct_ty) => Struct(Box::new(struct_ty.subst(ty_args)?)),
};
Ok(res)
}
pub fn type_tag(&self) -> VMResult<TypeTag> {
use FatType::*;
let res = match self {
Bool => TypeTag::Bool,
U8 => TypeTag::U8,
U64 => TypeTag::U64,
U128 => TypeTag::U128,
Address => TypeTag::Address,
Signer => TypeTag::Signer,
Vector(ty) => TypeTag::Vector(Box::new(ty.type_tag()?)),
Struct(struct_ty) => TypeTag::Struct(struct_ty.struct_tag()?),
ty @ Reference(_) | ty @ MutableReference(_) | ty @ TyParam(_) => {
return Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message(format!("cannot derive type tag for {:?}", ty)))
}
};
Ok(res)
}
pub fn is_resource(&self) -> VMResult<bool> {
use FatType::*;
match self {
Bool | U8 | U64 | U128 | Address | Reference(_) | MutableReference(_) => Ok(false),
Signer => Ok(true),
Vector(ty) => ty.is_resource(),
Struct(struct_ty) => Ok(struct_ty.is_resource),
// In the VM, concrete type arguments are required for type resolution and the only place
// uninstantiated type parameters can show up is the cache.
//
// Therefore `is_resource` should only be called upon types outside the cache, in which
// case it will always succeed. (Internal invariant violation otherwise.)
TyParam(_) => Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message("cannot check if a type parameter is a resource or not".to_string())),
}
}
pub fn debug_print<B: Write>(&self, buf: &mut B) -> VMResult<()> {
use FatType::*;
match self {
Bool => debug_write!(buf, "bool"),
U8 => debug_write!(buf, "u8"),
U64 => debug_write!(buf, "u64"),
U128 => debug_write!(buf, "u128"),
|
Signer => debug_write!(buf, "signer"),
Vector(elem_ty) => {
debug_write!(buf, "vector<")?;
elem_ty.debug_print(buf)?;
debug_write!(buf, ">")
}
Struct(struct_ty) => struct_ty.debug_print(buf),
Reference(ty) => {
debug_write!(buf, "&")?;
ty.debug_print(buf)
}
MutableReference(ty) => {
debug_write!(buf, "&mut ")?;
ty.debug_print(buf)
}
TyParam(_) => Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message("cannot print out uninstantiated type params".to_string())),
}
}
}
#[cfg(feature = "fuzzing")]
pub mod prop {
use super::*;
use proptest::{collection::vec, prelude::*};
impl FatType {
/// Generate a random primitive Type, no Struct or Vector.
pub fn single_value_strategy() -> impl Strategy<Value = Self> {
use FatType::*;
prop_oneof![
Just(Bool),
Just(U8),
Just(U64),
Just(U128),
Just(Address),
Just(Signer)
]
}
/// Generate a primitive Value, a Struct or a Vector.
pub fn nested_strategy(
depth: u32,
desired_size: u32,
expected_branch_size: u32,
) -> impl Strategy<Value = Self> {
use FatType::*;
let leaf = Self::single_value_strategy();
leaf.prop_recursive(depth, desired_size, expected_branch_size, |inner| {
prop_oneof![
inner
.clone()
.prop_map(|layout| FatType::Vector(Box::new(layout))),
(
any::<AccountAddress>(),
any::<Identifier>(),
any::<Identifier>(),
any::<bool>(),
vec(inner.clone(), 0..4),
vec(inner, 0..10)
)
.prop_map(
|(address, module, name, is_resource, ty_args, layout)| Struct(
Box::new(FatStructType {
address,
module,
name,
is_resource,
ty_args,
layout,
})
)
),
]
})
}
}
impl Arbitrary for FatType {
type Parameters = ();
fn arbitrary_with(_args: ()) -> Self::Strategy {
Self::nested_strategy(3, 20, 10).boxed()
}
type Strategy = BoxedStrategy<Self>;
}
}
impl TryInto<MoveStructLayout> for &FatStructType {
type Error = VMStatus;
fn try_into(self) -> Result<MoveStructLayout, Self::Error> {
Ok(MoveStructLayout::new(
self.layout
.iter()
.map(|ty| ty.try_into())
.collect::<VMResult<Vec<_>>>()?,
))
}
}
impl Try
|
Address => debug_write!(buf, "address"),
|
random_line_split
|
types.rs
|
::errors::VMResult;
use libra_types::access_path::AccessPath;
use serde::{Deserialize, Serialize};
/// VM representation of a struct type in Move.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "fuzzing", derive(Eq, PartialEq))]
pub struct FatStructType {
pub address: AccountAddress,
pub module: Identifier,
pub name: Identifier,
pub is_resource: bool,
pub ty_args: Vec<FatType>,
pub layout: Vec<FatType>,
}
/// VM representation of a Move type that gives access to both the fully qualified
/// name and data layout of the type.
///
/// TODO: this data structure itself is intended to be used in runtime only and
/// should NOT be serialized in any form. Currently we still derive `Serialize` and
/// `Deserialize`, but this is a hack for fuzzing and should be guarded behind the
/// "fuzzing" feature flag. We should look into ways to get rid of this.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "fuzzing", derive(Eq, PartialEq))]
pub enum FatType {
Bool,
U8,
U64,
U128,
Address,
Signer,
Vector(Box<FatType>),
Struct(Box<FatStructType>),
Reference(Box<FatType>),
MutableReference(Box<FatType>),
TyParam(usize),
}
impl FatStructType {
pub fn resource_path(&self) -> VMResult<Vec<u8>> {
Ok(AccessPath::resource_access_vec(&self.struct_tag()?))
}
pub fn subst(&self, ty_args: &[FatType]) -> VMResult<FatStructType> {
Ok(Self {
address: self.address,
module: self.module.clone(),
name: self.name.clone(),
is_resource: self.is_resource,
ty_args: self
.ty_args
.iter()
.map(|ty| ty.subst(ty_args))
.collect::<VMResult<_>>()?,
layout: self
.layout
.iter()
.map(|ty| ty.subst(ty_args))
.collect::<VMResult<_>>()?,
})
}
pub fn struct_tag(&self) -> VMResult<StructTag> {
let ty_args = self
.ty_args
.iter()
.map(|ty| ty.type_tag())
.collect::<VMResult<Vec<_>>>()?;
Ok(StructTag {
address: self.address,
module: self.module.clone(),
name: self.name.clone(),
type_params: ty_args,
})
}
pub fn debug_print<B: Write>(&self, buf: &mut B) -> VMResult<()> {
debug_write!(buf, "{}::{}", self.module, self.name)?;
let mut it = self.ty_args.iter();
if let Some(ty) = it.next() {
debug_write!(buf, "<")?;
ty.debug_print(buf)?;
for ty in it {
debug_write!(buf, ", ")?;
ty.debug_print(buf)?;
}
debug_write!(buf, ">")?;
}
Ok(())
}
}
impl FatType {
pub fn subst(&self, ty_args: &[FatType]) -> VMResult<FatType> {
use FatType::*;
let res = match self {
TyParam(idx) => match ty_args.get(*idx) {
Some(ty) => ty.clone(),
None => {
return Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message(format!(
"fat type substitution failed: index out of bounds -- len {} got {}",
ty_args.len(),
idx
)));
}
},
Bool => Bool,
U8 => U8,
U64 => U64,
U128 => U128,
Address => Address,
Signer => Signer,
Vector(ty) => Vector(Box::new(ty.subst(ty_args)?)),
Reference(ty) => Reference(Box::new(ty.subst(ty_args)?)),
MutableReference(ty) => MutableReference(Box::new(ty.subst(ty_args)?)),
Struct(struct_ty) => Struct(Box::new(struct_ty.subst(ty_args)?)),
};
Ok(res)
}
pub fn type_tag(&self) -> VMResult<TypeTag> {
use FatType::*;
let res = match self {
Bool => TypeTag::Bool,
U8 => TypeTag::U8,
U64 => TypeTag::U64,
U128 => TypeTag::U128,
Address => TypeTag::Address,
Signer => TypeTag::Signer,
Vector(ty) => TypeTag::Vector(Box::new(ty.type_tag()?)),
Struct(struct_ty) => TypeTag::Struct(struct_ty.struct_tag()?),
ty @ Reference(_) | ty @ MutableReference(_) | ty @ TyParam(_) => {
return Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message(format!("cannot derive type tag for {:?}", ty)))
}
};
Ok(res)
}
pub fn
|
(&self) -> VMResult<bool> {
use FatType::*;
match self {
Bool | U8 | U64 | U128 | Address | Reference(_) | MutableReference(_) => Ok(false),
Signer => Ok(true),
Vector(ty) => ty.is_resource(),
Struct(struct_ty) => Ok(struct_ty.is_resource),
// In the VM, concrete type arguments are required for type resolution and the only place
// uninstantiated type parameters can show up is the cache.
//
// Therefore `is_resource` should only be called upon types outside the cache, in which
// case it will always succeed. (Internal invariant violation otherwise.)
TyParam(_) => Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message("cannot check if a type parameter is a resource or not".to_string())),
}
}
pub fn debug_print<B: Write>(&self, buf: &mut B) -> VMResult<()> {
use FatType::*;
match self {
Bool => debug_write!(buf, "bool"),
U8 => debug_write!(buf, "u8"),
U64 => debug_write!(buf, "u64"),
U128 => debug_write!(buf, "u128"),
Address => debug_write!(buf, "address"),
Signer => debug_write!(buf, "signer"),
Vector(elem_ty) => {
debug_write!(buf, "vector<")?;
elem_ty.debug_print(buf)?;
debug_write!(buf, ">")
}
Struct(struct_ty) => struct_ty.debug_print(buf),
Reference(ty) => {
debug_write!(buf, "&")?;
ty.debug_print(buf)
}
MutableReference(ty) => {
debug_write!(buf, "&mut ")?;
ty.debug_print(buf)
}
TyParam(_) => Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message("cannot print out uninstantiated type params".to_string())),
}
}
}
#[cfg(feature = "fuzzing")]
pub mod prop {
use super::*;
use proptest::{collection::vec, prelude::*};
impl FatType {
/// Generate a random primitive Type, no Struct or Vector.
pub fn single_value_strategy() -> impl Strategy<Value = Self> {
use FatType::*;
prop_oneof![
Just(Bool),
Just(U8),
Just(U64),
Just(U128),
Just(Address),
Just(Signer)
]
}
/// Generate a primitive Value, a Struct or a Vector.
pub fn nested_strategy(
depth: u32,
desired_size: u32,
expected_branch_size: u32,
) -> impl Strategy<Value = Self> {
use FatType::*;
let leaf = Self::single_value_strategy();
leaf.prop_recursive(depth, desired_size, expected_branch_size, |inner| {
prop_oneof![
inner
.clone()
.prop_map(|layout| FatType::Vector(Box::new(layout))),
(
any::<AccountAddress>(),
any::<Identifier>(),
any::<Identifier>(),
any::<bool>(),
vec(inner.clone(), 0..4),
vec(inner, 0..10)
)
.prop_map(
|(address, module, name, is_resource, ty_args, layout)| Struct(
Box::new(FatStructType {
address,
module,
name,
is_resource,
ty_args,
layout,
})
)
),
]
})
}
}
impl Arbitrary for FatType {
type Parameters = ();
fn arbitrary_with(_args: ()) -> Self::Strategy {
Self::nested_strategy(3, 20, 10).boxed()
}
type Strategy = BoxedStrategy<Self>;
}
}
impl TryInto<MoveStructLayout> for &FatStructType {
type Error = VMStatus;
fn try_into(self) -> Result<MoveStructLayout, Self::Error> {
Ok(MoveStructLayout::new(
self.layout
.iter()
.map(|ty| ty.try_into())
.collect::<VMResult<Vec<_>>>()?,
))
}
}
impl Try
|
is_resource
|
identifier_name
|
types.rs
|
::errors::VMResult;
use libra_types::access_path::AccessPath;
use serde::{Deserialize, Serialize};
/// VM representation of a struct type in Move.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "fuzzing", derive(Eq, PartialEq))]
pub struct FatStructType {
pub address: AccountAddress,
pub module: Identifier,
pub name: Identifier,
pub is_resource: bool,
pub ty_args: Vec<FatType>,
pub layout: Vec<FatType>,
}
/// VM representation of a Move type that gives access to both the fully qualified
/// name and data layout of the type.
///
/// TODO: this data structure itself is intended to be used in runtime only and
/// should NOT be serialized in any form. Currently we still derive `Serialize` and
/// `Deserialize`, but this is a hack for fuzzing and should be guarded behind the
/// "fuzzing" feature flag. We should look into ways to get rid of this.
#[derive(Debug, Clone, Serialize, Deserialize)]
#[cfg_attr(feature = "fuzzing", derive(Eq, PartialEq))]
pub enum FatType {
Bool,
U8,
U64,
U128,
Address,
Signer,
Vector(Box<FatType>),
Struct(Box<FatStructType>),
Reference(Box<FatType>),
MutableReference(Box<FatType>),
TyParam(usize),
}
impl FatStructType {
pub fn resource_path(&self) -> VMResult<Vec<u8>> {
Ok(AccessPath::resource_access_vec(&self.struct_tag()?))
}
pub fn subst(&self, ty_args: &[FatType]) -> VMResult<FatStructType> {
Ok(Self {
address: self.address,
module: self.module.clone(),
name: self.name.clone(),
is_resource: self.is_resource,
ty_args: self
.ty_args
.iter()
.map(|ty| ty.subst(ty_args))
.collect::<VMResult<_>>()?,
layout: self
.layout
.iter()
.map(|ty| ty.subst(ty_args))
.collect::<VMResult<_>>()?,
})
}
pub fn struct_tag(&self) -> VMResult<StructTag> {
let ty_args = self
.ty_args
.iter()
.map(|ty| ty.type_tag())
.collect::<VMResult<Vec<_>>>()?;
Ok(StructTag {
address: self.address,
module: self.module.clone(),
name: self.name.clone(),
type_params: ty_args,
})
}
pub fn debug_print<B: Write>(&self, buf: &mut B) -> VMResult<()> {
debug_write!(buf, "{}::{}", self.module, self.name)?;
let mut it = self.ty_args.iter();
if let Some(ty) = it.next() {
debug_write!(buf, "<")?;
ty.debug_print(buf)?;
for ty in it {
debug_write!(buf, ", ")?;
ty.debug_print(buf)?;
}
debug_write!(buf, ">")?;
}
Ok(())
}
}
impl FatType {
pub fn subst(&self, ty_args: &[FatType]) -> VMResult<FatType> {
use FatType::*;
let res = match self {
TyParam(idx) => match ty_args.get(*idx) {
Some(ty) => ty.clone(),
None => {
return Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message(format!(
"fat type substitution failed: index out of bounds -- len {} got {}",
ty_args.len(),
idx
)));
}
},
Bool => Bool,
U8 => U8,
U64 => U64,
U128 => U128,
Address => Address,
Signer => Signer,
Vector(ty) => Vector(Box::new(ty.subst(ty_args)?)),
Reference(ty) => Reference(Box::new(ty.subst(ty_args)?)),
MutableReference(ty) => MutableReference(Box::new(ty.subst(ty_args)?)),
Struct(struct_ty) => Struct(Box::new(struct_ty.subst(ty_args)?)),
};
Ok(res)
}
pub fn type_tag(&self) -> VMResult<TypeTag>
|
}
pub fn is_resource(&self) -> VMResult<bool> {
use FatType::*;
match self {
Bool | U8 | U64 | U128 | Address | Reference(_) | MutableReference(_) => Ok(false),
Signer => Ok(true),
Vector(ty) => ty.is_resource(),
Struct(struct_ty) => Ok(struct_ty.is_resource),
// In the VM, concrete type arguments are required for type resolution and the only place
// uninstantiated type parameters can show up is the cache.
//
// Therefore `is_resource` should only be called upon types outside the cache, in which
// case it will always succeed. (Internal invariant violation otherwise.)
TyParam(_) => Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message("cannot check if a type parameter is a resource or not".to_string())),
}
}
pub fn debug_print<B: Write>(&self, buf: &mut B) -> VMResult<()> {
use FatType::*;
match self {
Bool => debug_write!(buf, "bool"),
U8 => debug_write!(buf, "u8"),
U64 => debug_write!(buf, "u64"),
U128 => debug_write!(buf, "u128"),
Address => debug_write!(buf, "address"),
Signer => debug_write!(buf, "signer"),
Vector(elem_ty) => {
debug_write!(buf, "vector<")?;
elem_ty.debug_print(buf)?;
debug_write!(buf, ">")
}
Struct(struct_ty) => struct_ty.debug_print(buf),
Reference(ty) => {
debug_write!(buf, "&")?;
ty.debug_print(buf)
}
MutableReference(ty) => {
debug_write!(buf, "&mut ")?;
ty.debug_print(buf)
}
TyParam(_) => Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message("cannot print out uninstantiated type params".to_string())),
}
}
}
#[cfg(feature = "fuzzing")]
pub mod prop {
use super::*;
use proptest::{collection::vec, prelude::*};
impl FatType {
/// Generate a random primitive Type, no Struct or Vector.
pub fn single_value_strategy() -> impl Strategy<Value = Self> {
use FatType::*;
prop_oneof![
Just(Bool),
Just(U8),
Just(U64),
Just(U128),
Just(Address),
Just(Signer)
]
}
/// Generate a primitive Value, a Struct or a Vector.
pub fn nested_strategy(
depth: u32,
desired_size: u32,
expected_branch_size: u32,
) -> impl Strategy<Value = Self> {
use FatType::*;
let leaf = Self::single_value_strategy();
leaf.prop_recursive(depth, desired_size, expected_branch_size, |inner| {
prop_oneof![
inner
.clone()
.prop_map(|layout| FatType::Vector(Box::new(layout))),
(
any::<AccountAddress>(),
any::<Identifier>(),
any::<Identifier>(),
any::<bool>(),
vec(inner.clone(), 0..4),
vec(inner, 0..10)
)
.prop_map(
|(address, module, name, is_resource, ty_args, layout)| Struct(
Box::new(FatStructType {
address,
module,
name,
is_resource,
ty_args,
layout,
})
)
),
]
})
}
}
impl Arbitrary for FatType {
type Parameters = ();
fn arbitrary_with(_args: ()) -> Self::Strategy {
Self::nested_strategy(3, 20, 10).boxed()
}
type Strategy = BoxedStrategy<Self>;
}
}
impl TryInto<MoveStructLayout> for &FatStructType {
type Error = VMStatus;
fn try_into(self) -> Result<MoveStructLayout, Self::Error> {
Ok(MoveStructLayout::new(
self.layout
.iter()
.map(|ty| ty.try_into())
.collect::<VMResult<Vec<_>>>()?,
))
}
}
impl Try
|
{
use FatType::*;
let res = match self {
Bool => TypeTag::Bool,
U8 => TypeTag::U8,
U64 => TypeTag::U64,
U128 => TypeTag::U128,
Address => TypeTag::Address,
Signer => TypeTag::Signer,
Vector(ty) => TypeTag::Vector(Box::new(ty.type_tag()?)),
Struct(struct_ty) => TypeTag::Struct(struct_ty.struct_tag()?),
ty @ Reference(_) | ty @ MutableReference(_) | ty @ TyParam(_) => {
return Err(VMStatus::new(StatusCode::UNKNOWN_INVARIANT_VIOLATION_ERROR)
.with_message(format!("cannot derive type tag for {:?}", ty)))
}
};
Ok(res)
|
identifier_body
|
hash2curve.rs
|
5c353c75c576bf82ccc96adb63c094dde580021eddeafd91f8c0bfee6f636528f3d0c47fd2"),
u_0: hex!("480cb3ac2c389db7f9dac9c396d2647ae946db844598971c26d1afd53912a1491199c0a5902811e4b809c26fcd37a014"),
u_1: hex!("d28435eb34680e148bf3908536e42231cba9e1f73ae2c6902a222a89db5c49c97db2f8fa4d4cd6e424b17ac60bdb9bb6"),
q0_x: hex!("42e6666f505e854187186bad3011598d9278b9d6e3e4d2503c3d236381a56748dec5d139c223129b324df53fa147c4df"),
q0_y: hex!("8ee51dbda46413bf621838cc935d18d617881c6f33f3838a79c767a1e5618e34b22f79142df708d2432f75c7366c8512"),
q1_x: hex!("4ff01ceeba60484fa1bc0d825fe1e5e383d8f79f1e5bb78e5fb26b7a7ef758153e31e78b9d60ce75c5e32e43869d4e12"),
q1_y: hex!("0f84b978fac8ceda7304b47e229d6037d32062e597dc7a9b95bcd9af441f3c56c619a901d21635f9ec6ab4710b9fcd0e"),
},
];
for test_vector in TEST_VECTORS {
// in parts
let mut u = [FieldElement::default(), FieldElement::default()];
hash2curve::hash_to_field::<ExpandMsgXmd<Sha384>, FieldElement>(
&[test_vector.msg],
&[DST],
&mut u,
)
.unwrap();
/// Assert that the provided projective point matches the given test vector.
// TODO(tarcieri): use coordinate APIs. See zkcrypto/group#30
macro_rules! assert_point_eq {
($actual:expr, $expected_x:expr, $expected_y:expr) => {
let point = $actual.to_affine().to_encoded_point(false);
let (actual_x, actual_y) = match point.coordinates() {
sec1::Coordinates::Uncompressed { x, y } => (x, y),
_ => unreachable!(),
};
assert_eq!(&$expected_x, actual_x.as_slice());
assert_eq!(&$expected_y, actual_y.as_slice());
};
}
assert_eq!(u[0].to_bytes().as_slice(), test_vector.u_0);
assert_eq!(u[1].to_bytes().as_slice(), test_vector.u_1);
let q0 = u[0].map_to_curve();
assert_point_eq!(q0, test_vector.q0_x, test_vector.q0_y);
let q1 = u[1].map_to_curve();
assert_point_eq!(q1, test_vector.q1_x, test_vector.q1_y);
let p = q0.clear_cofactor() + q1.clear_cofactor();
assert_point_eq!(p, test_vector.p_x, test_vector.p_y);
// complete run
let pt = NistP384::hash_from_bytes::<ExpandMsgXmd<Sha384>>(&[test_vector.msg], &[DST])
.unwrap();
assert_point_eq!(pt, test_vector.p_x, test_vector.p_y);
}
}
/// Taken from <https://www.ietf.org/archive/id/draft-irtf-cfrg-voprf-16.html#name-oprfp-384-sha-384-2>.
#[test]
fn hash_to_scalar_voprf() {
struct TestVector {
dst: &'static [u8],
key_info: &'static [u8],
seed: &'static [u8],
sk_sm: &'static [u8],
}
const TEST_VECTORS: &[TestVector] = &[
TestVector {
dst: b"DeriveKeyPairVOPRF10-\x00\x00\x04",
key_info: b"test key",
seed: &hex!("a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3"),
sk_sm: &hex!("c0503759ddd1e31d8c7eae9304c9b1c16f83d1f6d962e3e7b789cd85fd581800e96c5c4256131aafcff9a76919abbd55"),
},
TestVector {
dst: b"DeriveKeyPairVOPRF10-\x01\x00\x04",
key_info: b"test key",
seed: &hex!("a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3"),
sk_sm: &hex!("514fb6fe2e66af1383840759d56f71730331280f062930ee2a2f7ea42f935acf94087355699d788abfdf09d19a5c85ac"),
},
TestVector {
dst: b"DeriveKeyPairVOPRF10-\x02\x00\x04",
key_info: b"test key",
seed: &hex!("a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3"),
sk_sm: &hex!("0fcba4a204f67d6c13f780e613915f755319aaa3cb03cd20a5a4a6c403a4812a4fff5d3223e2c309aa66b05cb7611fd4"),
},
];
'outer: for test_vector in TEST_VECTORS {
let key_info_len = u16::try_from(test_vector.key_info.len())
.unwrap()
.to_be_bytes();
for counter in 0_u8..=u8::MAX {
let scalar = NistP384::hash_to_scalar::<ExpandMsgXmd<Sha384>>(
&[
test_vector.seed,
&key_info_len,
test_vector.key_info,
&counter.to_be_bytes(),
],
&[test_vector.dst],
)
.unwrap();
if !bool::from(scalar.is_zero())
|
{
assert_eq!(scalar.to_bytes().as_slice(), test_vector.sk_sm);
continue 'outer;
}
|
conditional_block
|
|
hash2curve.rs
|
1d1392b00df0f5400c06"),
q1_x: hex!("6ad7bc8ed8b841efd8ad0765c8a23d0b968ec9aa360a558ff33500f164faa02bee6c704f5f91507c4c5aad2b0dc5b943"),
q1_y: hex!("47313cc0a873ade774048338fc34ca5313f96bbf6ae22ac6ef475d85f03d24792dc6afba8d0b4a70170c1b4f0f716629"),
},
TestVector {
msg: b"a512_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
p_x: hex!("7b18d210b1f090ac701f65f606f6ca18fb8d081e3bc6cbd937c5604325f1cdea4c15c10a54ef303aabf2ea58bd9947a4"),
p_y: hex!("ea857285a33abb516732915c353c75c576bf82ccc96adb63c094dde580021eddeafd91f8c0bfee6f636528f3d0c47fd2"),
u_0: hex!("480cb3ac2c389db7f9dac9c396d2647ae946db844598971c26d1afd53912a1491199c0a5902811e4b809c26fcd37a014"),
u_1: hex!("d28435eb34680e148bf3908536e42231cba9e1f73ae2c6902a222a89db5c49c97db2f8fa4d4cd6e424b17ac60bdb9bb6"),
q0_x: hex!("42e6666f505e854187186bad3011598d9278b9d6e3e4d2503c3d236381a56748dec5d139c223129b324df53fa147c4df"),
q0_y: hex!("8ee51dbda46413bf621838cc935d18d617881c6f33f3838a79c767a1e5618e34b22f79142df708d2432f75c7366c8512"),
q1_x: hex!("4ff01ceeba60484fa1bc0d825fe1e5e383d8f79f1e5bb78e5fb26b7a7ef758153e31e78b9d60ce75c5e32e43869d4e12"),
q1_y: hex!("0f84b978fac8ceda7304b47e229d6037d32062e597dc7a9b95bcd9af441f3c56c619a901d21635f9ec6ab4710b9fcd0e"),
},
];
for test_vector in TEST_VECTORS {
// in parts
let mut u = [FieldElement::default(), FieldElement::default()];
hash2curve::hash_to_field::<ExpandMsgXmd<Sha384>, FieldElement>(
&[test_vector.msg],
&[DST],
&mut u,
)
.unwrap();
/// Assert that the provided projective point matches the given test vector.
// TODO(tarcieri): use coordinate APIs. See zkcrypto/group#30
macro_rules! assert_point_eq {
($actual:expr, $expected_x:expr, $expected_y:expr) => {
let point = $actual.to_affine().to_encoded_point(false);
let (actual_x, actual_y) = match point.coordinates() {
sec1::Coordinates::Uncompressed { x, y } => (x, y),
_ => unreachable!(),
};
assert_eq!(&$expected_x, actual_x.as_slice());
assert_eq!(&$expected_y, actual_y.as_slice());
};
}
assert_eq!(u[0].to_bytes().as_slice(), test_vector.u_0);
assert_eq!(u[1].to_bytes().as_slice(), test_vector.u_1);
let q0 = u[0].map_to_curve();
assert_point_eq!(q0, test_vector.q0_x, test_vector.q0_y);
let q1 = u[1].map_to_curve();
assert_point_eq!(q1, test_vector.q1_x, test_vector.q1_y);
let p = q0.clear_cofactor() + q1.clear_cofactor();
assert_point_eq!(p, test_vector.p_x, test_vector.p_y);
// complete run
let pt = NistP384::hash_from_bytes::<ExpandMsgXmd<Sha384>>(&[test_vector.msg], &[DST])
.unwrap();
assert_point_eq!(pt, test_vector.p_x, test_vector.p_y);
}
}
/// Taken from <https://www.ietf.org/archive/id/draft-irtf-cfrg-voprf-16.html#name-oprfp-384-sha-384-2>.
#[test]
fn hash_to_scalar_voprf()
|
{
struct TestVector {
dst: &'static [u8],
key_info: &'static [u8],
seed: &'static [u8],
sk_sm: &'static [u8],
}
const TEST_VECTORS: &[TestVector] = &[
TestVector {
dst: b"DeriveKeyPairVOPRF10-\x00\x00\x04",
key_info: b"test key",
seed: &hex!("a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3"),
sk_sm: &hex!("c0503759ddd1e31d8c7eae9304c9b1c16f83d1f6d962e3e7b789cd85fd581800e96c5c4256131aafcff9a76919abbd55"),
},
TestVector {
dst: b"DeriveKeyPairVOPRF10-\x01\x00\x04",
key_info: b"test key",
seed: &hex!("a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3"),
sk_sm: &hex!("514fb6fe2e66af1383840759d56f71730331280f062930ee2a2f7ea42f935acf94087355699d788abfdf09d19a5c85ac"),
|
identifier_body
|
|
hash2curve.rs
|
const DST: &[u8] = b"QUUX-V01-CS02-with-P384_XMD:SHA-384_SSWU_RO_";
const TEST_VECTORS: &[TestVector] = &[
TestVector {
msg: b"",
p_x: hex!("eb9fe1b4f4e14e7140803c1d99d0a93cd823d2b024040f9c067a8eca1f5a2eeac9ad604973527a356f3fa3aeff0e4d83"),
p_y: hex!("0c21708cff382b7f4643c07b105c2eaec2cead93a917d825601e63c8f21f6abd9abc22c93c2bed6f235954b25048bb1a"),
u_0: hex!("25c8d7dc1acd4ee617766693f7f8829396065d1b447eedb155871feffd9c6653279ac7e5c46edb7010a0e4ff64c9f3b4"),
u_1: hex!("59428be4ed69131df59a0c6a8e188d2d4ece3f1b2a3a02602962b47efa4d7905945b1e2cc80b36aa35c99451073521ac"),
q0_x: hex!("e4717e29eef38d862bee4902a7d21b44efb58c464e3e1f0d03894d94de310f8ffc6de86786dd3e15a1541b18d4eb2846"),
q0_y: hex!("6b95a6e639822312298a47526bb77d9cd7bcf76244c991c8cd70075e2ee6e8b9a135c4a37e3c0768c7ca871c0ceb53d4"),
q1_x: hex!("509527cfc0750eedc53147e6d5f78596c8a3b7360e0608e2fab0563a1670d58d8ae107c9f04bcf90e89489ace5650efd"),
q1_y: hex!("33337b13cb35e173fdea4cb9e8cce915d836ff57803dbbeb7998aa49d17df2ff09b67031773039d09fbd9305a1566bc4"),
},
TestVector {
msg: b"abc",
p_x: hex!("e02fc1a5f44a7519419dd314e29863f30df55a514da2d655775a81d413003c4d4e7fd59af0826dfaad4200ac6f60abe1"),
p_y: hex!("01f638d04d98677d65bef99aef1a12a70a4cbb9270ec55248c04530d8bc1f8f90f8a6a859a7c1f1ddccedf8f96d675f6"),
u_0: hex!("53350214cb6bef0b51abb791b1c4209a2b4c16a0c67e1ab1401017fad774cd3b3f9a8bcdf7f6229dd8dd5a075cb149a0"),
u_1: hex!("c0473083898f63e03f26f14877a2407bd60c75ad491e7d26cbc6cc5ce815654075ec6b6898c7a41d74ceaf720a10c02e"),
q0_x: hex!("fc853b69437aee9a19d5acf96a4ee4c5e04cf7b53406dfaa2afbdd7ad2351b7f554e4bbc6f5db4177d4d44f933a8f6ee"),
q0_y: hex!("7e042547e01834c9043b10f3a8221c4a879cb156f04f72bfccab0c047a304e30f2aa8b2e260d34c4592c0c33dd0c6482"),
q1_x: hex!("57912293709b3556b43a2dfb137a315d256d573b82ded120ef8c782d607c05d930d958e50cb6dc1cc480b9afc38c45f1"),
q1_y: hex!("de9387dab0eef0bda219c6f168a92645a84665c4f2137c14270fb424b7532ff84843c3da383ceea24c47fa343c227bb8"),
},
TestVector {
msg: b"abcdef0123456789",
p_x: hex!("bdecc1c1d870624965f19505be50459d363c71a699a496ab672f9a5d6b78676400926fbceee6fcd1780fe86e62b2aa89"),
p_y: hex!("57cf1f99b5ee00f3c201139b3bfe4dd30a653193778d89a0accc5e0f47e46e4e4b85a0595da29c9494c1814acafe183c"),
u_0: hex!("aab7fb87238cf6b2ab56cdcca7e028959bb2ea599d34f68484139dde85ec6548a6e48771d17956421bdb7790598ea52e"),
u_1: hex!("26e8d833552d7844d167833ca5a87c35bcfaa5a0d86023479fb28e5cd6075c18b168bf1f5d2a0ea146d057971336d8d1"),
q0_x: hex!("0ceece45b73f89844671df962ad2932122e878ad2259e650626924e4e7f13258934
|
q1_x: [u8; 48],
q1_y: [u8; 48],
}
|
random_line_split
|
|
hash2curve.rs
|
() {
struct TestVector {
msg: &'static [u8],
p_x: [u8; 48],
p_y: [u8; 48],
u_0: [u8; 48],
u_1: [u8; 48],
q0_x: [u8; 48],
q0_y: [u8; 48],
q1_x: [u8; 48],
q1_y: [u8; 48],
}
const DST: &[u8] = b"QUUX-V01-CS02-with-P384_XMD:SHA-384_SSWU_RO_";
const TEST_VECTORS: &[TestVector] = &[
TestVector {
msg: b"",
p_x: hex!("eb9fe1b4f4e14e7140803c1d99d0a93cd823d2b024040f9c067a8eca1f5a2eeac9ad604973527a356f3fa3aeff0e4d83"),
p_y: hex!("0c21708cff382b7f4643c07b105c2eaec2cead93a917d825601e63c8f21f6abd9abc22c93c2bed6f235954b25048bb1a"),
u_0: hex!("25c8d7dc1acd4ee617766693f7f8829396065d1b447eedb155871feffd9c6653279ac7e5c46edb7010a0e4ff64c9f3b4"),
u_1: hex!("59428be4ed69131df59a0c6a8e188d2d4ece3f1b2a3a02602962b47efa4d7905945b1e2cc80b36aa35c99451073521ac"),
q0_x: hex!("e4717e29eef38d862bee4902a7d21b44efb58c464e3e1f0d03894d94de310f8ffc6de86786dd3e15a1541b18d4eb2846"),
q0_y: hex!("6b95a6e639822312298a47526bb77d9cd7bcf76244c991c8cd70075e2ee6e8b9a135c4a37e3c0768c7ca871c0ceb53d4"),
q1_x: hex!("509527cfc0750eedc53147e6d5f78596c8a3b7360e0608e2fab0563a1670d58d8ae107c9f04bcf90e89489ace5650efd"),
q1_y: hex!("33337b13cb35e173fdea4cb9e8cce915d836ff57803dbbeb7998aa49d17df2ff09b67031773039d09fbd9305a1566bc4"),
},
TestVector {
msg: b"abc",
p_x: hex!("e02fc1a5f44a7519419dd314e29863f30df55a514da2d655775a81d413003c4d4e7fd59af0826dfaad4200ac6f60abe1"),
p_y: hex!("01f638d04d98677d65bef99aef1a12a70a4cbb9270ec55248c04530d8bc1f8f90f8a6a859a7c1f1ddccedf8f96d675f6"),
u_0: hex!("53350214cb6bef0b51abb791b1c4209a2b4c16a0c67e1ab1401017fad774cd3b3f9a8bcdf7f6229dd8dd5a075cb149a0"),
u_1: hex!("c0473083898f63e03f26f14877a2407bd60c75ad491e7d26cbc6cc5ce815654075ec6b6898c7a41d74ceaf720a10c02e"),
q0_x: hex!("fc853b69437aee9a19d5acf96a4ee4c5e04cf7b53406dfaa2afbdd7ad2351b7f554e4bbc6f5db4177d4d44f933a8f6ee"),
q0_y: hex!("7e042547e01834c9043b10f3a8221c4a879cb156f04f72bfccab0c047a304e30f2aa8b2e260d34c4592c0c33dd0c6482"),
q1_x: hex!("57912293709b3556b43a2dfb137a315d256d573b82ded120ef8c782d607c05d930d958e50cb6dc1cc480b9afc38c45f1"),
q1_y: hex!("de9387dab0eef0bda219c6f168a92645a84665c4f2137c14270fb424b7532ff84843c3da383ceea24c47fa343c227bb8"),
},
TestVector {
msg: b"abcdef0123456789",
p_x: hex!("bdecc1c1d870624965f19505be50459d363c71a699a496ab672f9a5d6b78676400926fbceee6fcd1780fe86e62b2aa89"),
p_y: hex!("57cf1f99b5ee00f3c201139b3bfe4dd30a653193778d89a0accc5e0f47e46e4e4b85a0595da29c9494c1814acafe183c"),
u_0: hex!("aab7fb87238cf6b2ab56cdcca7e028959bb2ea599d34f68484139dde85ec6548a6e48771d17956421bdb7790598ea52e"),
u_1: hex!("26e8d833552d7844d167833ca5a87c35bcfaa5a0d86023479fb28e5cd6075c18b168bf
|
hash_to_curve
|
identifier_name
|
|
load_config.go
|
2p priv key: %w", err)
}
conf.Priv = p
if err := loadListenOpts(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load p2p listen options: %w", err)
}
if err := loadDiscoveryOpts(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load p2p discovery options: %w", err)
}
if err := loadLibp2pOpts(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load p2p options: %w", err)
}
if err := loadGossipOptions(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load p2p gossip options: %w", err)
}
if err := loadScoringParams(conf, ctx, rollupCfg); err != nil {
return nil, fmt.Errorf("failed to load p2p peer scoring options: %w", err)
}
if err := loadBanningOptions(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load banning option: %w", err)
}
conf.EnableReqRespSync = ctx.Bool(flags.SyncReqRespFlag.Name)
return conf, nil
}
func
|
(p uint) (uint16, error) {
if p == 0 {
return 0, nil
}
if p >= (1 << 16) {
return 0, fmt.Errorf("port out of range: %d", p)
}
if p < 1024 {
return 0, fmt.Errorf("port is reserved for system: %d", p)
}
return uint16(p), nil
}
// loadScoringParams loads the peer scoring options from the CLI context.
func loadScoringParams(conf *p2p.Config, ctx *cli.Context, rollupCfg *rollup.Config) error {
scoringLevel := ctx.String(flags.Scoring.Name)
// Check old names for backwards compatibility
if scoringLevel == "" {
scoringLevel = ctx.String(flags.PeerScoring.Name)
}
if scoringLevel == "" {
scoringLevel = ctx.String(flags.TopicScoring.Name)
}
if scoringLevel != "" {
params, err := p2p.GetScoringParams(scoringLevel, rollupCfg)
if err != nil {
return err
}
conf.ScoringParams = params
}
return nil
}
// loadBanningOptions loads whether or not to ban peers from the CLI context.
func loadBanningOptions(conf *p2p.Config, ctx *cli.Context) error {
conf.BanningEnabled = ctx.Bool(flags.Banning.Name)
conf.BanningThreshold = ctx.Float64(flags.BanningThreshold.Name)
conf.BanningDuration = ctx.Duration(flags.BanningDuration.Name)
return nil
}
func loadListenOpts(conf *p2p.Config, ctx *cli.Context) error {
listenIP := ctx.String(flags.ListenIP.Name)
if listenIP != "" { // optional
conf.ListenIP = net.ParseIP(listenIP)
if conf.ListenIP == nil {
return fmt.Errorf("failed to parse IP %q", listenIP)
}
}
var err error
conf.ListenTCPPort, err = validatePort(ctx.Uint(flags.ListenTCPPort.Name))
if err != nil {
return fmt.Errorf("bad listen TCP port: %w", err)
}
conf.ListenUDPPort, err = validatePort(ctx.Uint(flags.ListenUDPPort.Name))
if err != nil {
return fmt.Errorf("bad listen UDP port: %w", err)
}
return nil
}
func loadDiscoveryOpts(conf *p2p.Config, ctx *cli.Context) error {
if ctx.Bool(flags.NoDiscovery.Name) {
conf.NoDiscovery = true
}
var err error
conf.AdvertiseTCPPort, err = validatePort(ctx.Uint(flags.AdvertiseTCPPort.Name))
if err != nil {
return fmt.Errorf("bad advertised TCP port: %w", err)
}
conf.AdvertiseUDPPort, err = validatePort(ctx.Uint(flags.AdvertiseUDPPort.Name))
if err != nil {
return fmt.Errorf("bad advertised UDP port: %w", err)
}
adIP := ctx.String(flags.AdvertiseIP.Name)
if adIP != "" { // optional
ips, err := net.LookupIP(adIP)
if err != nil {
return fmt.Errorf("failed to lookup IP of %q to advertise in ENR: %w", adIP, err)
}
// Find the first v4 IP it resolves to
for _, ip := range ips {
if ipv4 := ip.To4(); ipv4 != nil {
conf.AdvertiseIP = ipv4
break
}
}
if conf.AdvertiseIP == nil {
return fmt.Errorf("failed to parse IP %q", adIP)
}
}
dbPath := ctx.String(flags.DiscoveryPath.Name)
if dbPath == "" {
dbPath = "opnode_discovery_db"
}
if dbPath == "memory" {
dbPath = ""
}
conf.DiscoveryDB, err = enode.OpenDB(dbPath)
if err != nil {
return fmt.Errorf("failed to open discovery db: %w", err)
}
bootnodes := make([]*enode.Node, 0)
records := strings.Split(ctx.String(flags.Bootnodes.Name), ",")
for i, recordB64 := range records {
recordB64 = strings.TrimSpace(recordB64)
if recordB64 == "" { // ignore empty records
continue
}
nodeRecord, err := enode.Parse(enode.ValidSchemes, recordB64)
if err != nil {
return fmt.Errorf("bootnode record %d (of %d) is invalid: %q err: %w", i, len(records), recordB64, err)
}
bootnodes = append(bootnodes, nodeRecord)
}
if len(bootnodes) > 0 {
conf.Bootnodes = bootnodes
} else {
conf.Bootnodes = p2p.DefaultBootnodes
}
netRestrict, err := netutil.ParseNetlist(ctx.String(flags.NetRestrict.Name))
if err != nil {
return fmt.Errorf("failed to parse net list: %w", err)
}
conf.NetRestrict = netRestrict
return nil
}
func loadLibp2pOpts(conf *p2p.Config, ctx *cli.Context) error {
addrs := strings.Split(ctx.String(flags.StaticPeers.Name), ",")
for i, addr := range addrs {
addr = strings.TrimSpace(addr)
if addr == "" {
continue // skip empty multi addrs
}
a, err := multiaddr.NewMultiaddr(addr)
if err != nil {
return fmt.Errorf("failed to parse multi addr of static peer %d (out of %d): %q err: %w", i, len(addrs), addr, err)
}
conf.StaticPeers = append(conf.StaticPeers, a)
}
for _, v := range strings.Split(ctx.String(flags.HostMux.Name), ",") {
v = strings.ToLower(strings.TrimSpace(v))
switch v {
case "yamux":
conf.HostMux = append(conf.HostMux, p2p.YamuxC())
case "mplex":
conf.HostMux = append(conf.HostMux, p2p.MplexC())
default:
return fmt.Errorf("could not recognize mux %s", v)
}
}
secArr := strings.Split(ctx.String(flags.HostSecurity.Name), ",")
for _, v := range secArr {
v = strings.ToLower(strings.TrimSpace(v))
switch v {
case "none": // no security, for debugging etc.
if len(conf.HostSecurity) > 0 || len(secArr) > 1 {
return errors.New("cannot mix secure transport protocols with no-security")
}
conf.NoTransportSecurity = true
case "noise":
conf.HostSecurity = append(conf.HostSecurity, p2p.NoiseC())
case "tls":
conf.HostSecurity = append(conf.HostSecurity, p2p.TlsC())
default:
return fmt.Errorf("could not recognize security %s", v)
}
}
conf.PeersLo = ctx.Uint(flags.PeersLo.Name)
conf.PeersHi = ctx.Uint(flags.PeersHi.Name)
conf.PeersGrace = ctx.Duration(flags.PeersGrace.Name)
conf.NAT = ctx.Bool(flags.NAT.Name)
conf.UserAgent = ctx.String(flags.UserAgent.Name)
conf.TimeoutNegotiation = ctx.Duration(flags.TimeoutNegotiation.Name)
conf.TimeoutAccept = ctx.Duration(flags.TimeoutAccept.Name)
conf.TimeoutDial = ctx.Duration(flags.TimeoutDial.Name)
peerstorePath := ctx.String(flags.PeerstorePath.Name)
if peerstorePath == "" {
return errors.New("peerstore path must be specified, use 'memory' to explicitly not persist peer records")
}
var err error
var store ds.Batching
if peerstorePath == "memory" {
store = sync.MutexWrap(ds.NewMapDatastore())
} else {
store, err = leveldb.NewDatastore(peerstorePath, nil) // default leveldb options are fine
if err != nil {
return fmt.Errorf("failed to open leveldb db for peerstore: %w", err)
}
}
conf.Store = store
return nil
}
func loadNetworkPrivKey(ctx *cli.Context) (*crypto.Sec
|
validatePort
|
identifier_name
|
load_config.go
|
p priv key: %w", err)
}
conf.Priv = p
if err := loadListenOpts(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load p2p listen options: %w", err)
}
if err := loadDiscoveryOpts(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load p2p discovery options: %w", err)
}
if err := loadLibp2pOpts(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load p2p options: %w", err)
}
if err := loadGossipOptions(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load p2p gossip options: %w", err)
}
if err := loadScoringParams(conf, ctx, rollupCfg); err != nil {
return nil, fmt.Errorf("failed to load p2p peer scoring options: %w", err)
}
if err := loadBanningOptions(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load banning option: %w", err)
}
conf.EnableReqRespSync = ctx.Bool(flags.SyncReqRespFlag.Name)
return conf, nil
}
func validatePort(p uint) (uint16, error)
|
// loadScoringParams loads the peer scoring options from the CLI context.
func loadScoringParams(conf *p2p.Config, ctx *cli.Context, rollupCfg *rollup.Config) error {
scoringLevel := ctx.String(flags.Scoring.Name)
// Check old names for backwards compatibility
if scoringLevel == "" {
scoringLevel = ctx.String(flags.PeerScoring.Name)
}
if scoringLevel == "" {
scoringLevel = ctx.String(flags.TopicScoring.Name)
}
if scoringLevel != "" {
params, err := p2p.GetScoringParams(scoringLevel, rollupCfg)
if err != nil {
return err
}
conf.ScoringParams = params
}
return nil
}
// loadBanningOptions loads whether or not to ban peers from the CLI context.
func loadBanningOptions(conf *p2p.Config, ctx *cli.Context) error {
conf.BanningEnabled = ctx.Bool(flags.Banning.Name)
conf.BanningThreshold = ctx.Float64(flags.BanningThreshold.Name)
conf.BanningDuration = ctx.Duration(flags.BanningDuration.Name)
return nil
}
func loadListenOpts(conf *p2p.Config, ctx *cli.Context) error {
listenIP := ctx.String(flags.ListenIP.Name)
if listenIP != "" { // optional
conf.ListenIP = net.ParseIP(listenIP)
if conf.ListenIP == nil {
return fmt.Errorf("failed to parse IP %q", listenIP)
}
}
var err error
conf.ListenTCPPort, err = validatePort(ctx.Uint(flags.ListenTCPPort.Name))
if err != nil {
return fmt.Errorf("bad listen TCP port: %w", err)
}
conf.ListenUDPPort, err = validatePort(ctx.Uint(flags.ListenUDPPort.Name))
if err != nil {
return fmt.Errorf("bad listen UDP port: %w", err)
}
return nil
}
func loadDiscoveryOpts(conf *p2p.Config, ctx *cli.Context) error {
if ctx.Bool(flags.NoDiscovery.Name) {
conf.NoDiscovery = true
}
var err error
conf.AdvertiseTCPPort, err = validatePort(ctx.Uint(flags.AdvertiseTCPPort.Name))
if err != nil {
return fmt.Errorf("bad advertised TCP port: %w", err)
}
conf.AdvertiseUDPPort, err = validatePort(ctx.Uint(flags.AdvertiseUDPPort.Name))
if err != nil {
return fmt.Errorf("bad advertised UDP port: %w", err)
}
adIP := ctx.String(flags.AdvertiseIP.Name)
if adIP != "" { // optional
ips, err := net.LookupIP(adIP)
if err != nil {
return fmt.Errorf("failed to lookup IP of %q to advertise in ENR: %w", adIP, err)
}
// Find the first v4 IP it resolves to
for _, ip := range ips {
if ipv4 := ip.To4(); ipv4 != nil {
conf.AdvertiseIP = ipv4
break
}
}
if conf.AdvertiseIP == nil {
return fmt.Errorf("failed to parse IP %q", adIP)
}
}
dbPath := ctx.String(flags.DiscoveryPath.Name)
if dbPath == "" {
dbPath = "opnode_discovery_db"
}
if dbPath == "memory" {
dbPath = ""
}
conf.DiscoveryDB, err = enode.OpenDB(dbPath)
if err != nil {
return fmt.Errorf("failed to open discovery db: %w", err)
}
bootnodes := make([]*enode.Node, 0)
records := strings.Split(ctx.String(flags.Bootnodes.Name), ",")
for i, recordB64 := range records {
recordB64 = strings.TrimSpace(recordB64)
if recordB64 == "" { // ignore empty records
continue
}
nodeRecord, err := enode.Parse(enode.ValidSchemes, recordB64)
if err != nil {
return fmt.Errorf("bootnode record %d (of %d) is invalid: %q err: %w", i, len(records), recordB64, err)
}
bootnodes = append(bootnodes, nodeRecord)
}
if len(bootnodes) > 0 {
conf.Bootnodes = bootnodes
} else {
conf.Bootnodes = p2p.DefaultBootnodes
}
netRestrict, err := netutil.ParseNetlist(ctx.String(flags.NetRestrict.Name))
if err != nil {
return fmt.Errorf("failed to parse net list: %w", err)
}
conf.NetRestrict = netRestrict
return nil
}
func loadLibp2pOpts(conf *p2p.Config, ctx *cli.Context) error {
addrs := strings.Split(ctx.String(flags.StaticPeers.Name), ",")
for i, addr := range addrs {
addr = strings.TrimSpace(addr)
if addr == "" {
continue // skip empty multi addrs
}
a, err := multiaddr.NewMultiaddr(addr)
if err != nil {
return fmt.Errorf("failed to parse multi addr of static peer %d (out of %d): %q err: %w", i, len(addrs), addr, err)
}
conf.StaticPeers = append(conf.StaticPeers, a)
}
for _, v := range strings.Split(ctx.String(flags.HostMux.Name), ",") {
v = strings.ToLower(strings.TrimSpace(v))
switch v {
case "yamux":
conf.HostMux = append(conf.HostMux, p2p.YamuxC())
case "mplex":
conf.HostMux = append(conf.HostMux, p2p.MplexC())
default:
return fmt.Errorf("could not recognize mux %s", v)
}
}
secArr := strings.Split(ctx.String(flags.HostSecurity.Name), ",")
for _, v := range secArr {
v = strings.ToLower(strings.TrimSpace(v))
switch v {
case "none": // no security, for debugging etc.
if len(conf.HostSecurity) > 0 || len(secArr) > 1 {
return errors.New("cannot mix secure transport protocols with no-security")
}
conf.NoTransportSecurity = true
case "noise":
conf.HostSecurity = append(conf.HostSecurity, p2p.NoiseC())
case "tls":
conf.HostSecurity = append(conf.HostSecurity, p2p.TlsC())
default:
return fmt.Errorf("could not recognize security %s", v)
}
}
conf.PeersLo = ctx.Uint(flags.PeersLo.Name)
conf.PeersHi = ctx.Uint(flags.PeersHi.Name)
conf.PeersGrace = ctx.Duration(flags.PeersGrace.Name)
conf.NAT = ctx.Bool(flags.NAT.Name)
conf.UserAgent = ctx.String(flags.UserAgent.Name)
conf.TimeoutNegotiation = ctx.Duration(flags.TimeoutNegotiation.Name)
conf.TimeoutAccept = ctx.Duration(flags.TimeoutAccept.Name)
conf.TimeoutDial = ctx.Duration(flags.TimeoutDial.Name)
peerstorePath := ctx.String(flags.PeerstorePath.Name)
if peerstorePath == "" {
return errors.New("peerstore path must be specified, use 'memory' to explicitly not persist peer records")
}
var err error
var store ds.Batching
if peerstorePath == "memory" {
store = sync.MutexWrap(ds.NewMapDatastore())
} else {
store, err = leveldb.NewDatastore(peerstorePath, nil) // default leveldb options are fine
if err != nil {
return fmt.Errorf("failed to open leveldb db for peerstore: %w", err)
}
}
conf.Store = store
return nil
}
func loadNetworkPrivKey(ctx *cli.Context) (*crypto.Sec
|
{
if p == 0 {
return 0, nil
}
if p >= (1 << 16) {
return 0, fmt.Errorf("port out of range: %d", p)
}
if p < 1024 {
return 0, fmt.Errorf("port is reserved for system: %d", p)
}
return uint16(p), nil
}
|
identifier_body
|
load_config.go
|
p priv key: %w", err)
}
conf.Priv = p
if err := loadListenOpts(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load p2p listen options: %w", err)
}
if err := loadDiscoveryOpts(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load p2p discovery options: %w", err)
}
if err := loadLibp2pOpts(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load p2p options: %w", err)
}
if err := loadGossipOptions(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load p2p gossip options: %w", err)
}
if err := loadScoringParams(conf, ctx, rollupCfg); err != nil {
return nil, fmt.Errorf("failed to load p2p peer scoring options: %w", err)
}
if err := loadBanningOptions(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load banning option: %w", err)
}
conf.EnableReqRespSync = ctx.Bool(flags.SyncReqRespFlag.Name)
return conf, nil
}
func validatePort(p uint) (uint16, error) {
if p == 0 {
return 0, nil
}
if p >= (1 << 16) {
return 0, fmt.Errorf("port out of range: %d", p)
}
if p < 1024 {
return 0, fmt.Errorf("port is reserved for system: %d", p)
}
return uint16(p), nil
}
// loadScoringParams loads the peer scoring options from the CLI context.
func loadScoringParams(conf *p2p.Config, ctx *cli.Context, rollupCfg *rollup.Config) error {
scoringLevel := ctx.String(flags.Scoring.Name)
// Check old names for backwards compatibility
if scoringLevel == "" {
scoringLevel = ctx.String(flags.PeerScoring.Name)
}
if scoringLevel == "" {
scoringLevel = ctx.String(flags.TopicScoring.Name)
}
if scoringLevel != "" {
params, err := p2p.GetScoringParams(scoringLevel, rollupCfg)
if err != nil {
return err
}
conf.ScoringParams = params
}
return nil
}
// loadBanningOptions loads whether or not to ban peers from the CLI context.
func loadBanningOptions(conf *p2p.Config, ctx *cli.Context) error {
conf.BanningEnabled = ctx.Bool(flags.Banning.Name)
conf.BanningThreshold = ctx.Float64(flags.BanningThreshold.Name)
conf.BanningDuration = ctx.Duration(flags.BanningDuration.Name)
return nil
}
func loadListenOpts(conf *p2p.Config, ctx *cli.Context) error {
listenIP := ctx.String(flags.ListenIP.Name)
if listenIP != "" { // optional
conf.ListenIP = net.ParseIP(listenIP)
if conf.ListenIP == nil {
return fmt.Errorf("failed to parse IP %q", listenIP)
}
}
var err error
conf.ListenTCPPort, err = validatePort(ctx.Uint(flags.ListenTCPPort.Name))
if err != nil {
return fmt.Errorf("bad listen TCP port: %w", err)
}
conf.ListenUDPPort, err = validatePort(ctx.Uint(flags.ListenUDPPort.Name))
if err != nil {
return fmt.Errorf("bad listen UDP port: %w", err)
}
return nil
}
func loadDiscoveryOpts(conf *p2p.Config, ctx *cli.Context) error {
if ctx.Bool(flags.NoDiscovery.Name)
|
var err error
conf.AdvertiseTCPPort, err = validatePort(ctx.Uint(flags.AdvertiseTCPPort.Name))
if err != nil {
return fmt.Errorf("bad advertised TCP port: %w", err)
}
conf.AdvertiseUDPPort, err = validatePort(ctx.Uint(flags.AdvertiseUDPPort.Name))
if err != nil {
return fmt.Errorf("bad advertised UDP port: %w", err)
}
adIP := ctx.String(flags.AdvertiseIP.Name)
if adIP != "" { // optional
ips, err := net.LookupIP(adIP)
if err != nil {
return fmt.Errorf("failed to lookup IP of %q to advertise in ENR: %w", adIP, err)
}
// Find the first v4 IP it resolves to
for _, ip := range ips {
if ipv4 := ip.To4(); ipv4 != nil {
conf.AdvertiseIP = ipv4
break
}
}
if conf.AdvertiseIP == nil {
return fmt.Errorf("failed to parse IP %q", adIP)
}
}
dbPath := ctx.String(flags.DiscoveryPath.Name)
if dbPath == "" {
dbPath = "opnode_discovery_db"
}
if dbPath == "memory" {
dbPath = ""
}
conf.DiscoveryDB, err = enode.OpenDB(dbPath)
if err != nil {
return fmt.Errorf("failed to open discovery db: %w", err)
}
bootnodes := make([]*enode.Node, 0)
records := strings.Split(ctx.String(flags.Bootnodes.Name), ",")
for i, recordB64 := range records {
recordB64 = strings.TrimSpace(recordB64)
if recordB64 == "" { // ignore empty records
continue
}
nodeRecord, err := enode.Parse(enode.ValidSchemes, recordB64)
if err != nil {
return fmt.Errorf("bootnode record %d (of %d) is invalid: %q err: %w", i, len(records), recordB64, err)
}
bootnodes = append(bootnodes, nodeRecord)
}
if len(bootnodes) > 0 {
conf.Bootnodes = bootnodes
} else {
conf.Bootnodes = p2p.DefaultBootnodes
}
netRestrict, err := netutil.ParseNetlist(ctx.String(flags.NetRestrict.Name))
if err != nil {
return fmt.Errorf("failed to parse net list: %w", err)
}
conf.NetRestrict = netRestrict
return nil
}
func loadLibp2pOpts(conf *p2p.Config, ctx *cli.Context) error {
addrs := strings.Split(ctx.String(flags.StaticPeers.Name), ",")
for i, addr := range addrs {
addr = strings.TrimSpace(addr)
if addr == "" {
continue // skip empty multi addrs
}
a, err := multiaddr.NewMultiaddr(addr)
if err != nil {
return fmt.Errorf("failed to parse multi addr of static peer %d (out of %d): %q err: %w", i, len(addrs), addr, err)
}
conf.StaticPeers = append(conf.StaticPeers, a)
}
for _, v := range strings.Split(ctx.String(flags.HostMux.Name), ",") {
v = strings.ToLower(strings.TrimSpace(v))
switch v {
case "yamux":
conf.HostMux = append(conf.HostMux, p2p.YamuxC())
case "mplex":
conf.HostMux = append(conf.HostMux, p2p.MplexC())
default:
return fmt.Errorf("could not recognize mux %s", v)
}
}
secArr := strings.Split(ctx.String(flags.HostSecurity.Name), ",")
for _, v := range secArr {
v = strings.ToLower(strings.TrimSpace(v))
switch v {
case "none": // no security, for debugging etc.
if len(conf.HostSecurity) > 0 || len(secArr) > 1 {
return errors.New("cannot mix secure transport protocols with no-security")
}
conf.NoTransportSecurity = true
case "noise":
conf.HostSecurity = append(conf.HostSecurity, p2p.NoiseC())
case "tls":
conf.HostSecurity = append(conf.HostSecurity, p2p.TlsC())
default:
return fmt.Errorf("could not recognize security %s", v)
}
}
conf.PeersLo = ctx.Uint(flags.PeersLo.Name)
conf.PeersHi = ctx.Uint(flags.PeersHi.Name)
conf.PeersGrace = ctx.Duration(flags.PeersGrace.Name)
conf.NAT = ctx.Bool(flags.NAT.Name)
conf.UserAgent = ctx.String(flags.UserAgent.Name)
conf.TimeoutNegotiation = ctx.Duration(flags.TimeoutNegotiation.Name)
conf.TimeoutAccept = ctx.Duration(flags.TimeoutAccept.Name)
conf.TimeoutDial = ctx.Duration(flags.TimeoutDial.Name)
peerstorePath := ctx.String(flags.PeerstorePath.Name)
if peerstorePath == "" {
return errors.New("peerstore path must be specified, use 'memory' to explicitly not persist peer records")
}
var err error
var store ds.Batching
if peerstorePath == "memory" {
store = sync.MutexWrap(ds.NewMapDatastore())
} else {
store, err = leveldb.NewDatastore(peerstorePath, nil) // default leveldb options are fine
if err != nil {
return fmt.Errorf("failed to open leveldb db for peerstore: %w", err)
}
}
conf.Store = store
return nil
}
func loadNetworkPrivKey(ctx *cli.Context) (*crypto.Sec
|
{
conf.NoDiscovery = true
}
|
conditional_block
|
load_config.go
|
2p priv key: %w", err)
}
conf.Priv = p
if err := loadListenOpts(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load p2p listen options: %w", err)
}
if err := loadDiscoveryOpts(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load p2p discovery options: %w", err)
}
if err := loadLibp2pOpts(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load p2p options: %w", err)
}
if err := loadGossipOptions(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load p2p gossip options: %w", err)
}
if err := loadScoringParams(conf, ctx, rollupCfg); err != nil {
return nil, fmt.Errorf("failed to load p2p peer scoring options: %w", err)
}
if err := loadBanningOptions(conf, ctx); err != nil {
return nil, fmt.Errorf("failed to load banning option: %w", err)
}
conf.EnableReqRespSync = ctx.Bool(flags.SyncReqRespFlag.Name)
return conf, nil
}
func validatePort(p uint) (uint16, error) {
if p == 0 {
return 0, nil
}
if p >= (1 << 16) {
return 0, fmt.Errorf("port out of range: %d", p)
}
if p < 1024 {
return 0, fmt.Errorf("port is reserved for system: %d", p)
}
return uint16(p), nil
}
|
if scoringLevel == "" {
scoringLevel = ctx.String(flags.PeerScoring.Name)
}
if scoringLevel == "" {
scoringLevel = ctx.String(flags.TopicScoring.Name)
}
if scoringLevel != "" {
params, err := p2p.GetScoringParams(scoringLevel, rollupCfg)
if err != nil {
return err
}
conf.ScoringParams = params
}
return nil
}
// loadBanningOptions loads whether or not to ban peers from the CLI context.
func loadBanningOptions(conf *p2p.Config, ctx *cli.Context) error {
conf.BanningEnabled = ctx.Bool(flags.Banning.Name)
conf.BanningThreshold = ctx.Float64(flags.BanningThreshold.Name)
conf.BanningDuration = ctx.Duration(flags.BanningDuration.Name)
return nil
}
func loadListenOpts(conf *p2p.Config, ctx *cli.Context) error {
listenIP := ctx.String(flags.ListenIP.Name)
if listenIP != "" { // optional
conf.ListenIP = net.ParseIP(listenIP)
if conf.ListenIP == nil {
return fmt.Errorf("failed to parse IP %q", listenIP)
}
}
var err error
conf.ListenTCPPort, err = validatePort(ctx.Uint(flags.ListenTCPPort.Name))
if err != nil {
return fmt.Errorf("bad listen TCP port: %w", err)
}
conf.ListenUDPPort, err = validatePort(ctx.Uint(flags.ListenUDPPort.Name))
if err != nil {
return fmt.Errorf("bad listen UDP port: %w", err)
}
return nil
}
func loadDiscoveryOpts(conf *p2p.Config, ctx *cli.Context) error {
if ctx.Bool(flags.NoDiscovery.Name) {
conf.NoDiscovery = true
}
var err error
conf.AdvertiseTCPPort, err = validatePort(ctx.Uint(flags.AdvertiseTCPPort.Name))
if err != nil {
return fmt.Errorf("bad advertised TCP port: %w", err)
}
conf.AdvertiseUDPPort, err = validatePort(ctx.Uint(flags.AdvertiseUDPPort.Name))
if err != nil {
return fmt.Errorf("bad advertised UDP port: %w", err)
}
adIP := ctx.String(flags.AdvertiseIP.Name)
if adIP != "" { // optional
ips, err := net.LookupIP(adIP)
if err != nil {
return fmt.Errorf("failed to lookup IP of %q to advertise in ENR: %w", adIP, err)
}
// Find the first v4 IP it resolves to
for _, ip := range ips {
if ipv4 := ip.To4(); ipv4 != nil {
conf.AdvertiseIP = ipv4
break
}
}
if conf.AdvertiseIP == nil {
return fmt.Errorf("failed to parse IP %q", adIP)
}
}
dbPath := ctx.String(flags.DiscoveryPath.Name)
if dbPath == "" {
dbPath = "opnode_discovery_db"
}
if dbPath == "memory" {
dbPath = ""
}
conf.DiscoveryDB, err = enode.OpenDB(dbPath)
if err != nil {
return fmt.Errorf("failed to open discovery db: %w", err)
}
bootnodes := make([]*enode.Node, 0)
records := strings.Split(ctx.String(flags.Bootnodes.Name), ",")
for i, recordB64 := range records {
recordB64 = strings.TrimSpace(recordB64)
if recordB64 == "" { // ignore empty records
continue
}
nodeRecord, err := enode.Parse(enode.ValidSchemes, recordB64)
if err != nil {
return fmt.Errorf("bootnode record %d (of %d) is invalid: %q err: %w", i, len(records), recordB64, err)
}
bootnodes = append(bootnodes, nodeRecord)
}
if len(bootnodes) > 0 {
conf.Bootnodes = bootnodes
} else {
conf.Bootnodes = p2p.DefaultBootnodes
}
netRestrict, err := netutil.ParseNetlist(ctx.String(flags.NetRestrict.Name))
if err != nil {
return fmt.Errorf("failed to parse net list: %w", err)
}
conf.NetRestrict = netRestrict
return nil
}
func loadLibp2pOpts(conf *p2p.Config, ctx *cli.Context) error {
addrs := strings.Split(ctx.String(flags.StaticPeers.Name), ",")
for i, addr := range addrs {
addr = strings.TrimSpace(addr)
if addr == "" {
continue // skip empty multi addrs
}
a, err := multiaddr.NewMultiaddr(addr)
if err != nil {
return fmt.Errorf("failed to parse multi addr of static peer %d (out of %d): %q err: %w", i, len(addrs), addr, err)
}
conf.StaticPeers = append(conf.StaticPeers, a)
}
for _, v := range strings.Split(ctx.String(flags.HostMux.Name), ",") {
v = strings.ToLower(strings.TrimSpace(v))
switch v {
case "yamux":
conf.HostMux = append(conf.HostMux, p2p.YamuxC())
case "mplex":
conf.HostMux = append(conf.HostMux, p2p.MplexC())
default:
return fmt.Errorf("could not recognize mux %s", v)
}
}
secArr := strings.Split(ctx.String(flags.HostSecurity.Name), ",")
for _, v := range secArr {
v = strings.ToLower(strings.TrimSpace(v))
switch v {
case "none": // no security, for debugging etc.
if len(conf.HostSecurity) > 0 || len(secArr) > 1 {
return errors.New("cannot mix secure transport protocols with no-security")
}
conf.NoTransportSecurity = true
case "noise":
conf.HostSecurity = append(conf.HostSecurity, p2p.NoiseC())
case "tls":
conf.HostSecurity = append(conf.HostSecurity, p2p.TlsC())
default:
return fmt.Errorf("could not recognize security %s", v)
}
}
conf.PeersLo = ctx.Uint(flags.PeersLo.Name)
conf.PeersHi = ctx.Uint(flags.PeersHi.Name)
conf.PeersGrace = ctx.Duration(flags.PeersGrace.Name)
conf.NAT = ctx.Bool(flags.NAT.Name)
conf.UserAgent = ctx.String(flags.UserAgent.Name)
conf.TimeoutNegotiation = ctx.Duration(flags.TimeoutNegotiation.Name)
conf.TimeoutAccept = ctx.Duration(flags.TimeoutAccept.Name)
conf.TimeoutDial = ctx.Duration(flags.TimeoutDial.Name)
peerstorePath := ctx.String(flags.PeerstorePath.Name)
if peerstorePath == "" {
return errors.New("peerstore path must be specified, use 'memory' to explicitly not persist peer records")
}
var err error
var store ds.Batching
if peerstorePath == "memory" {
store = sync.MutexWrap(ds.NewMapDatastore())
} else {
store, err = leveldb.NewDatastore(peerstorePath, nil) // default leveldb options are fine
if err != nil {
return fmt.Errorf("failed to open leveldb db for peerstore: %w", err)
}
}
conf.Store = store
return nil
}
func loadNetworkPrivKey(ctx *cli.Context) (*crypto.Secp
|
// loadScoringParams loads the peer scoring options from the CLI context.
func loadScoringParams(conf *p2p.Config, ctx *cli.Context, rollupCfg *rollup.Config) error {
scoringLevel := ctx.String(flags.Scoring.Name)
// Check old names for backwards compatibility
|
random_line_split
|
training.py
|
0,
height=20,
max_gens=None,
n_players=None, # players per game
n_games=None, # games per round
n_rounds=None, # number of games each player will play
):
if not root_dir:
now = datetime.datetime.now()
root_dir = now.strftime("training-%Y%m%d-%H%M%S")
mkdir_p(root_dir)
# list of all winners
winners = []
gen_start = 0
winners_path = os.path.join(root_dir, "winners.json")
if os.path.exists(winners_path):
with open(winners_path) as f:
winners = json.load(f)
gen_start = max(w["gen"] for w in winners)
# append to a training log
training_log_path = os.path.join(root_dir, "training.log")
def training_log(msg):
with open(training_log_path, "a") as f:
f.write(msg)
training_log("started at %s\n" % datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
graph(training_log_path)
# keep a json list of games
game_list_path = os.path.join(root_dir, "games.json")
games = []
if os.path.exists(game_list_path):
with open(game_list_path) as f:
games = json.load(f)
# genetic solver and initial population
solver = genetic.GeneticSolver()
funcs = []
if winners:
funcs = [genetic.FuncWithErr(solver.parsefunc(GenetiSnake.ARITY, w["func"])) for w in winners]
n = n_games * n_players
funcs = funcs[:n]
if n > len(funcs):
funcs += solver.population(GenetiSnake.ARITY, n-len(funcs))
# I need to train to beat worthy adversaries
def extra_snakes():
return [
GenetiSnake(genetic.FuncWithErr(solver.parsefunc(GenetiSnake.ARITY, funcstr)))
for funcstr in (
"(neg var1)",
)]
solver.start(maxgens = max_gens)
solver.gen_count = gen_start
winners = []
time0 = time.clock()
gen0 = solver.gen_count
pool = Pool()
while not solver.converged():
# timing
dt = (time.clock() - time0) * 1000
dgens = float(solver.gen_count - gen0)
if dgens > 0 and dt > 10:
training_log("time check: %s generations in %s sec: %s sec/gen\n" % (dgens, dt, dt/dgens))
time0 = time.clock()
gen0 = solver.gen_count
gen_count = solver.gen_count+1
training_log("start generation=%s rounds=%s snakes=%s\n" % (solver.gen_count, n_rounds, len(funcs)))
# play n_rounds games of n_players randomly shuffled
snakes = {}
for func in funcs:
func.err = 0
func.games = []
snake = GenetiSnake(func)
snake.err = 0
snake.games = []
snake.snake_id = id(snake)
snakes[snake.snake_id] = snake
game_count = 0
game_infos = []
for game_round in range(n_rounds):
# pick groups of n snakes, and add in a few greedy snakes to beat
snake_ids = snakes.keys()
random.shuffle(snakes.keys())
start = 0
while start < len(snake_ids):
snake_group = [snakes[snake_id] for snake_id in snake_ids[start : start + n_players]]
start += n_players
# add a couple of greedy snakes to the game for competition
snake_group += extra_snakes()
game_count += 1
game_infos.append(dict(
root_dir = root_dir,
width = width,
height = height,
game_count = game_count,
game_round = game_round,
gen_count = gen_count,
snakes = snake_group,
))
for result in pool.map(play_game, game_infos):
for snake_id, snake_result in result['snakes'].items():
snake = snakes[snake_id]
snake.games.append(snake_result['game'])
snake.turns += snake_result['turns']
snake.err += snake_result['err']
# update list of games
games.append(dict(
path = result['game_json'],
generation = result['gen_count'],
game = result['game_count'],
round = result['game_round'],
turns = result['turn_count'],
func_size = result['func_size'], # game.killed[-1].snake.move_func.func.child_count(),
func = result['func'], # func=str(game.killed[-1].snake.move_func.func),
))
# Evaluate snakes: miximize turns and killed_order
for snake in snakes.values():
func = snake.move_func
func.err = -snake.err
func.turns = snake.turns
func.games = snake.games
assert len(func.games) == n_rounds
# the solver makes the next generation based on func.err
parents, funcs = solver.generation(funcs)
# winners for the generation
training_log("winners generation=%s\n" % (solver.gen_count-1))
winner_func = None
for func in parents:
if hasattr(func, 'turns'):
training_log("turns=%s func=%s\n" % (func.turns, func))
if not winner_func:
winner_func = func
training_log("finish generation=%s\n" % (solver.gen_count-1))
graph(training_log_path)
# keep only the games for the top 100 winners
if winner_func:
winner = dict(
err=winner_func.err,
func=str(winner_func.func),
turns=winner_func.turns,
games=winner_func.games,
gen=solver.gen_count,
)
training_log("game generation=%s winner=%s\n" % (solver.gen_count, winner))
winners.append(winner)
# only keep the last few winners
winners = sorted(winners, key=lambda w: w['err'])[:MAX_WINNERS]
winners = winners[:MAX_WINNERS]
winner_games = set()
for w in winners:
for g in w['games']:
m = RE_GAME.match(g['path'])
assert m
winner_games.add(m.group('basename'))
# delete game files not in winner_games
for path in os.listdir(root_dir):
m = RE_GAME.match(path)
if m and m.group('basename') not in winner_games:
p = os.path.join(root_dir, path)
os.unlink(p)
# delete games not in winner_games
games_keep = []
for game in games:
m = RE_GAME.match(game['path'])
assert m
if m.group('basename') in winner_games:
games_keep.append(game)
games = games_keep
game_list_path_t = game_list_path + '.t'
with open(game_list_path_t, "w") as f:
json.dump(games, f, indent=2)
os.rename(game_list_path_t, game_list_path)
# write winners
winners_path_t = winners_path + ".t"
with open(winners_path_t, "w") as f:
json.dump(winners, f, sort_keys=True, indent=2)
os.rename(winners_path_t, winners_path)
def play_game(game_info):
# set up a game with n_players
game = Game(game_info['width'], game_info['height'])
game_hdr = dict(
snakes=[],
)
gen_count = game_info['gen_count']
game_round = game_info['game_round']
game_count = game_info['game_count']
root_dir = game_info['root_dir']
# write game logs to tmp files
game_name = "gen%03d-game%02d" % (gen_count, game_count)
game_log_path_tmp = os.path.join(root_dir, "%s.log.tmp" % game_name)
game_json_path_tmp = os.path.join(root_dir, "%s.json.tmp" % game_name)
with open(game_log_path_tmp, 'w') as game_log, \
open(game_json_path_tmp, 'w') as game_json :
game_log.write("game start generation=%s round=%s game=%s pid=%s\n" % (gen_count, game_round, game_count, os.getpid()))
for snake in game_info['snakes']:
|
# print the json header and start a list of turns
game_json.write(json.dumps(game_hdr))
game_json.seek(-1, io.SEEK_CUR)
game_json.write(', "turns": [\n')
# play the game
for _board in game.run():
if game.turn_count > 0:
game_json.write(",\n")
game_json.write(json.dumps(game.to_dict()))
# end the game when all but 1 are dead
if len(game.snakes)
|
player = game.add_snake(snake)
game_log.write("game snake: %s func=%s\n" % (player, snake.move_func.func))
game_hdr["snakes"].append(dict(
board_id=player.board_id,
func=str(snake.move_func.func),
))
|
conditional_block
|
training.py
|
0,
height=20,
max_gens=None,
n_players=None, # players per game
n_games=None, # games per round
n_rounds=None, # number of games each player will play
):
if not root_dir:
now = datetime.datetime.now()
root_dir = now.strftime("training-%Y%m%d-%H%M%S")
mkdir_p(root_dir)
# list of all winners
winners = []
gen_start = 0
winners_path = os.path.join(root_dir, "winners.json")
if os.path.exists(winners_path):
with open(winners_path) as f:
winners = json.load(f)
gen_start = max(w["gen"] for w in winners)
# append to a training log
training_log_path = os.path.join(root_dir, "training.log")
def training_log(msg):
with open(training_log_path, "a") as f:
f.write(msg)
training_log("started at %s\n" % datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
graph(training_log_path)
# keep a json list of games
game_list_path = os.path.join(root_dir, "games.json")
games = []
if os.path.exists(game_list_path):
with open(game_list_path) as f:
games = json.load(f)
# genetic solver and initial population
solver = genetic.GeneticSolver()
funcs = []
if winners:
funcs = [genetic.FuncWithErr(solver.parsefunc(GenetiSnake.ARITY, w["func"])) for w in winners]
n = n_games * n_players
funcs = funcs[:n]
if n > len(funcs):
funcs += solver.population(GenetiSnake.ARITY, n-len(funcs))
# I need to train to beat worthy adversaries
def extra_snakes():
return [
GenetiSnake(genetic.FuncWithErr(solver.parsefunc(GenetiSnake.ARITY, funcstr)))
for funcstr in (
"(neg var1)",
)]
solver.start(maxgens = max_gens)
solver.gen_count = gen_start
winners = []
time0 = time.clock()
gen0 = solver.gen_count
pool = Pool()
while not solver.converged():
# timing
dt = (time.clock() - time0) * 1000
dgens = float(solver.gen_count - gen0)
if dgens > 0 and dt > 10:
training_log("time check: %s generations in %s sec: %s sec/gen\n" % (dgens, dt, dt/dgens))
time0 = time.clock()
gen0 = solver.gen_count
gen_count = solver.gen_count+1
training_log("start generation=%s rounds=%s snakes=%s\n" % (solver.gen_count, n_rounds, len(funcs)))
# play n_rounds games of n_players randomly shuffled
snakes = {}
for func in funcs:
func.err = 0
func.games = []
snake = GenetiSnake(func)
snake.err = 0
snake.games = []
snake.snake_id = id(snake)
snakes[snake.snake_id] = snake
game_count = 0
game_infos = []
for game_round in range(n_rounds):
# pick groups of n snakes, and add in a few greedy snakes to beat
snake_ids = snakes.keys()
random.shuffle(snakes.keys())
start = 0
while start < len(snake_ids):
snake_group = [snakes[snake_id] for snake_id in snake_ids[start : start + n_players]]
start += n_players
# add a couple of greedy snakes to the game for competition
snake_group += extra_snakes()
game_count += 1
game_infos.append(dict(
root_dir = root_dir,
|
height = height,
game_count = game_count,
game_round = game_round,
gen_count = gen_count,
snakes = snake_group,
))
for result in pool.map(play_game, game_infos):
for snake_id, snake_result in result['snakes'].items():
snake = snakes[snake_id]
snake.games.append(snake_result['game'])
snake.turns += snake_result['turns']
snake.err += snake_result['err']
# update list of games
games.append(dict(
path = result['game_json'],
generation = result['gen_count'],
game = result['game_count'],
round = result['game_round'],
turns = result['turn_count'],
func_size = result['func_size'], # game.killed[-1].snake.move_func.func.child_count(),
func = result['func'], # func=str(game.killed[-1].snake.move_func.func),
))
# Evaluate snakes: miximize turns and killed_order
for snake in snakes.values():
func = snake.move_func
func.err = -snake.err
func.turns = snake.turns
func.games = snake.games
assert len(func.games) == n_rounds
# the solver makes the next generation based on func.err
parents, funcs = solver.generation(funcs)
# winners for the generation
training_log("winners generation=%s\n" % (solver.gen_count-1))
winner_func = None
for func in parents:
if hasattr(func, 'turns'):
training_log("turns=%s func=%s\n" % (func.turns, func))
if not winner_func:
winner_func = func
training_log("finish generation=%s\n" % (solver.gen_count-1))
graph(training_log_path)
# keep only the games for the top 100 winners
if winner_func:
winner = dict(
err=winner_func.err,
func=str(winner_func.func),
turns=winner_func.turns,
games=winner_func.games,
gen=solver.gen_count,
)
training_log("game generation=%s winner=%s\n" % (solver.gen_count, winner))
winners.append(winner)
# only keep the last few winners
winners = sorted(winners, key=lambda w: w['err'])[:MAX_WINNERS]
winners = winners[:MAX_WINNERS]
winner_games = set()
for w in winners:
for g in w['games']:
m = RE_GAME.match(g['path'])
assert m
winner_games.add(m.group('basename'))
# delete game files not in winner_games
for path in os.listdir(root_dir):
m = RE_GAME.match(path)
if m and m.group('basename') not in winner_games:
p = os.path.join(root_dir, path)
os.unlink(p)
# delete games not in winner_games
games_keep = []
for game in games:
m = RE_GAME.match(game['path'])
assert m
if m.group('basename') in winner_games:
games_keep.append(game)
games = games_keep
game_list_path_t = game_list_path + '.t'
with open(game_list_path_t, "w") as f:
json.dump(games, f, indent=2)
os.rename(game_list_path_t, game_list_path)
# write winners
winners_path_t = winners_path + ".t"
with open(winners_path_t, "w") as f:
json.dump(winners, f, sort_keys=True, indent=2)
os.rename(winners_path_t, winners_path)
def play_game(game_info):
# set up a game with n_players
game = Game(game_info['width'], game_info['height'])
game_hdr = dict(
snakes=[],
)
gen_count = game_info['gen_count']
game_round = game_info['game_round']
game_count = game_info['game_count']
root_dir = game_info['root_dir']
# write game logs to tmp files
game_name = "gen%03d-game%02d" % (gen_count, game_count)
game_log_path_tmp = os.path.join(root_dir, "%s.log.tmp" % game_name)
game_json_path_tmp = os.path.join(root_dir, "%s.json.tmp" % game_name)
with open(game_log_path_tmp, 'w') as game_log, \
open(game_json_path_tmp, 'w') as game_json :
game_log.write("game start generation=%s round=%s game=%s pid=%s\n" % (gen_count, game_round, game_count, os.getpid()))
for snake in game_info['snakes']:
player = game.add_snake(snake)
game_log.write("game snake: %s func=%s\n" % (player, snake.move_func.func))
game_hdr["snakes"].append(dict(
board_id=player.board_id,
func=str(snake.move_func.func),
))
# print the json header and start a list of turns
game_json.write(json.dumps(game_hdr))
game_json.seek(-1, io.SEEK_CUR)
game_json.write(', "turns": [\n')
# play the game
for _board in game.run():
if game.turn_count > 0:
game_json.write(",\n")
game_json.write(json.dumps(game.to_dict()))
# end the game when all but 1 are dead
if len(game.snakes) <=
|
width = width,
|
random_line_split
|
training.py
|
def evolve(
root_dir=None,
width=20,
height=20,
max_gens=None,
n_players=None, # players per game
n_games=None, # games per round
n_rounds=None, # number of games each player will play
):
if not root_dir:
now = datetime.datetime.now()
root_dir = now.strftime("training-%Y%m%d-%H%M%S")
mkdir_p(root_dir)
# list of all winners
winners = []
gen_start = 0
winners_path = os.path.join(root_dir, "winners.json")
if os.path.exists(winners_path):
with open(winners_path) as f:
winners = json.load(f)
gen_start = max(w["gen"] for w in winners)
# append to a training log
training_log_path = os.path.join(root_dir, "training.log")
def training_log(msg):
with open(training_log_path, "a") as f:
f.write(msg)
training_log("started at %s\n" % datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
graph(training_log_path)
# keep a json list of games
game_list_path = os.path.join(root_dir, "games.json")
games = []
if os.path.exists(game_list_path):
with open(game_list_path) as f:
games = json.load(f)
# genetic solver and initial population
solver = genetic.GeneticSolver()
funcs = []
if winners:
funcs = [genetic.FuncWithErr(solver.parsefunc(GenetiSnake.ARITY, w["func"])) for w in winners]
n = n_games * n_players
funcs = funcs[:n]
if n > len(funcs):
funcs += solver.population(GenetiSnake.ARITY, n-len(funcs))
# I need to train to beat worthy adversaries
def extra_snakes():
return [
GenetiSnake(genetic.FuncWithErr(solver.parsefunc(GenetiSnake.ARITY, funcstr)))
for funcstr in (
"(neg var1)",
)]
solver.start(maxgens = max_gens)
solver.gen_count = gen_start
winners = []
time0 = time.clock()
gen0 = solver.gen_count
pool = Pool()
while not solver.converged():
# timing
dt = (time.clock() - time0) * 1000
dgens = float(solver.gen_count - gen0)
if dgens > 0 and dt > 10:
training_log("time check: %s generations in %s sec: %s sec/gen\n" % (dgens, dt, dt/dgens))
time0 = time.clock()
gen0 = solver.gen_count
gen_count = solver.gen_count+1
training_log("start generation=%s rounds=%s snakes=%s\n" % (solver.gen_count, n_rounds, len(funcs)))
# play n_rounds games of n_players randomly shuffled
snakes = {}
for func in funcs:
func.err = 0
func.games = []
snake = GenetiSnake(func)
snake.err = 0
snake.games = []
snake.snake_id = id(snake)
snakes[snake.snake_id] = snake
game_count = 0
game_infos = []
for game_round in range(n_rounds):
# pick groups of n snakes, and add in a few greedy snakes to beat
snake_ids = snakes.keys()
random.shuffle(snakes.keys())
start = 0
while start < len(snake_ids):
snake_group = [snakes[snake_id] for snake_id in snake_ids[start : start + n_players]]
start += n_players
# add a couple of greedy snakes to the game for competition
snake_group += extra_snakes()
game_count += 1
game_infos.append(dict(
root_dir = root_dir,
width = width,
height = height,
game_count = game_count,
game_round = game_round,
gen_count = gen_count,
snakes = snake_group,
))
for result in pool.map(play_game, game_infos):
for snake_id, snake_result in result['snakes'].items():
snake = snakes[snake_id]
snake.games.append(snake_result['game'])
snake.turns += snake_result['turns']
snake.err += snake_result['err']
# update list of games
games.append(dict(
path = result['game_json'],
generation = result['gen_count'],
game = result['game_count'],
round = result['game_round'],
turns = result['turn_count'],
func_size = result['func_size'], # game.killed[-1].snake.move_func.func.child_count(),
func = result['func'], # func=str(game.killed[-1].snake.move_func.func),
))
# Evaluate snakes: miximize turns and killed_order
for snake in snakes.values():
func = snake.move_func
func.err = -snake.err
func.turns = snake.turns
func.games = snake.games
assert len(func.games) == n_rounds
# the solver makes the next generation based on func.err
parents, funcs = solver.generation(funcs)
# winners for the generation
training_log("winners generation=%s\n" % (solver.gen_count-1))
winner_func = None
for func in parents:
if hasattr(func, 'turns'):
training_log("turns=%s func=%s\n" % (func.turns, func))
if not winner_func:
winner_func = func
training_log("finish generation=%s\n" % (solver.gen_count-1))
graph(training_log_path)
# keep only the games for the top 100 winners
if winner_func:
winner = dict(
err=winner_func.err,
func=str(winner_func.func),
turns=winner_func.turns,
games=winner_func.games,
gen=solver.gen_count,
)
training_log("game generation=%s winner=%s\n" % (solver.gen_count, winner))
winners.append(winner)
# only keep the last few winners
winners = sorted(winners, key=lambda w: w['err'])[:MAX_WINNERS]
winners = winners[:MAX_WINNERS]
winner_games = set()
for w in winners:
for g in w['games']:
m = RE_GAME.match(g['path'])
assert m
winner_games.add(m.group('basename'))
# delete game files not in winner_games
for path in os.listdir(root_dir):
m = RE_GAME.match(path)
if m and m.group('basename') not in winner_games:
p = os.path.join(root_dir, path)
os.unlink(p)
# delete games not in winner_games
games_keep = []
for game in games:
m = RE_GAME.match(game['path'])
assert m
if m.group('basename') in winner_games:
games_keep.append(game)
games = games_keep
game_list_path_t = game_list_path + '.t'
with open(game_list_path_t, "w") as f:
json.dump(games, f, indent=2)
os.rename(game_list_path_t, game_list_path)
# write winners
winners_path_t = winners_path + ".t"
with open(winners_path_t, "w") as f:
json.dump(winners, f, sort_keys=True, indent=2)
os.rename(winners_path_t, winners_path)
def play_game(game_info):
# set up a game with n_players
game = Game(game_info['width'], game_info['height'])
game_hdr = dict(
snakes=[],
)
gen_count = game_info['gen_count']
game_round = game_info['game_round']
game_count = game_info['game_count']
root_dir = game_info['root_dir']
# write game logs to tmp files
game_name = "gen%03d-game%02d" % (gen_count, game_count)
game_log_path_tmp = os.path.join(root_dir, "%s.log.tmp" % game_name)
game_json_path_tmp = os.path.join(root_dir, "%s.json.tmp" % game_name)
with open(game_log_path_tmp, 'w') as game_log, \
open(game_json_path_tmp, 'w') as game_json :
game_log.write("game start generation=%s round=%s game=%s pid=%s\n" % (gen_count, game_round, game_count, os.getpid()))
for snake in game_info['snakes']:
player =
|
errs = []
turns = []
for line in fileinput.input(training_log_path):
m = RE_LOG_WINNER.match(line)
if m:
winner = m.group('winner')
try:
w = eval(winner) # pylint: disable=eval-used
errs.append(-w['err'])
turns.append(w['turns'])
except Exception as e:
LOG.warn("couldn't parse winner=%s: e=%s", winner, e)
continue
# update the progress graph
plt.plot(errs)
plt.plot(turns)
plt.ylabel('Turns / Err')
plt.xlabel('Generation')
plt.savefig(os.path.join(os.path.dirname(training_log_path), 'turns.svg'))
|
identifier_body
|
|
training.py
|
1000
dgens = float(solver.gen_count - gen0)
if dgens > 0 and dt > 10:
training_log("time check: %s generations in %s sec: %s sec/gen\n" % (dgens, dt, dt/dgens))
time0 = time.clock()
gen0 = solver.gen_count
gen_count = solver.gen_count+1
training_log("start generation=%s rounds=%s snakes=%s\n" % (solver.gen_count, n_rounds, len(funcs)))
# play n_rounds games of n_players randomly shuffled
snakes = {}
for func in funcs:
func.err = 0
func.games = []
snake = GenetiSnake(func)
snake.err = 0
snake.games = []
snake.snake_id = id(snake)
snakes[snake.snake_id] = snake
game_count = 0
game_infos = []
for game_round in range(n_rounds):
# pick groups of n snakes, and add in a few greedy snakes to beat
snake_ids = snakes.keys()
random.shuffle(snakes.keys())
start = 0
while start < len(snake_ids):
snake_group = [snakes[snake_id] for snake_id in snake_ids[start : start + n_players]]
start += n_players
# add a couple of greedy snakes to the game for competition
snake_group += extra_snakes()
game_count += 1
game_infos.append(dict(
root_dir = root_dir,
width = width,
height = height,
game_count = game_count,
game_round = game_round,
gen_count = gen_count,
snakes = snake_group,
))
for result in pool.map(play_game, game_infos):
for snake_id, snake_result in result['snakes'].items():
snake = snakes[snake_id]
snake.games.append(snake_result['game'])
snake.turns += snake_result['turns']
snake.err += snake_result['err']
# update list of games
games.append(dict(
path = result['game_json'],
generation = result['gen_count'],
game = result['game_count'],
round = result['game_round'],
turns = result['turn_count'],
func_size = result['func_size'], # game.killed[-1].snake.move_func.func.child_count(),
func = result['func'], # func=str(game.killed[-1].snake.move_func.func),
))
# Evaluate snakes: miximize turns and killed_order
for snake in snakes.values():
func = snake.move_func
func.err = -snake.err
func.turns = snake.turns
func.games = snake.games
assert len(func.games) == n_rounds
# the solver makes the next generation based on func.err
parents, funcs = solver.generation(funcs)
# winners for the generation
training_log("winners generation=%s\n" % (solver.gen_count-1))
winner_func = None
for func in parents:
if hasattr(func, 'turns'):
training_log("turns=%s func=%s\n" % (func.turns, func))
if not winner_func:
winner_func = func
training_log("finish generation=%s\n" % (solver.gen_count-1))
graph(training_log_path)
# keep only the games for the top 100 winners
if winner_func:
winner = dict(
err=winner_func.err,
func=str(winner_func.func),
turns=winner_func.turns,
games=winner_func.games,
gen=solver.gen_count,
)
training_log("game generation=%s winner=%s\n" % (solver.gen_count, winner))
winners.append(winner)
# only keep the last few winners
winners = sorted(winners, key=lambda w: w['err'])[:MAX_WINNERS]
winners = winners[:MAX_WINNERS]
winner_games = set()
for w in winners:
for g in w['games']:
m = RE_GAME.match(g['path'])
assert m
winner_games.add(m.group('basename'))
# delete game files not in winner_games
for path in os.listdir(root_dir):
m = RE_GAME.match(path)
if m and m.group('basename') not in winner_games:
p = os.path.join(root_dir, path)
os.unlink(p)
# delete games not in winner_games
games_keep = []
for game in games:
m = RE_GAME.match(game['path'])
assert m
if m.group('basename') in winner_games:
games_keep.append(game)
games = games_keep
game_list_path_t = game_list_path + '.t'
with open(game_list_path_t, "w") as f:
json.dump(games, f, indent=2)
os.rename(game_list_path_t, game_list_path)
# write winners
winners_path_t = winners_path + ".t"
with open(winners_path_t, "w") as f:
json.dump(winners, f, sort_keys=True, indent=2)
os.rename(winners_path_t, winners_path)
def play_game(game_info):
# set up a game with n_players
game = Game(game_info['width'], game_info['height'])
game_hdr = dict(
snakes=[],
)
gen_count = game_info['gen_count']
game_round = game_info['game_round']
game_count = game_info['game_count']
root_dir = game_info['root_dir']
# write game logs to tmp files
game_name = "gen%03d-game%02d" % (gen_count, game_count)
game_log_path_tmp = os.path.join(root_dir, "%s.log.tmp" % game_name)
game_json_path_tmp = os.path.join(root_dir, "%s.json.tmp" % game_name)
with open(game_log_path_tmp, 'w') as game_log, \
open(game_json_path_tmp, 'w') as game_json :
game_log.write("game start generation=%s round=%s game=%s pid=%s\n" % (gen_count, game_round, game_count, os.getpid()))
for snake in game_info['snakes']:
player = game.add_snake(snake)
game_log.write("game snake: %s func=%s\n" % (player, snake.move_func.func))
game_hdr["snakes"].append(dict(
board_id=player.board_id,
func=str(snake.move_func.func),
))
# print the json header and start a list of turns
game_json.write(json.dumps(game_hdr))
game_json.seek(-1, io.SEEK_CUR)
game_json.write(', "turns": [\n')
# play the game
for _board in game.run():
if game.turn_count > 0:
game_json.write(",\n")
game_json.write(json.dumps(game.to_dict()))
# end the game when all but 1 are dead
if len(game.snakes) <= 1:
break
game.killed += game.snakes
game_log.write("game winners generation=%s round=%s game=%s\n" % (gen_count, game_round, game_count))
for player in sorted(game.killed, key=lambda p: p.turns, reverse=True):
game_log.write("snake: %s func=%s\n" % (player, player.snake.move_func.func))
game_log.write("game finished generation=%s round=%s game=%s\n" % (gen_count, game_round, game_count))
game_json.write("\n]}\n")
# move the tmp logs to their permanent names
game_name = "gen%03d-game%02d-turns%05d" % (gen_count, game_count, game.turn_count)
game_log_path = os.path.join(root_dir, "%s.log" % game_name)
assert RE_GAME.match(os.path.basename(game_log_path))
os.rename(game_log_path_tmp, game_log_path)
game_json_path_rel = "%s.json" % game_name
game_json_path = os.path.join(root_dir, game_json_path_rel)
assert RE_GAME.match(os.path.basename(game_json_path))
os.rename(game_json_path_tmp, game_json_path)
# sum up the turns each player lasted
result_snakes = {}
for killed_order, player in enumerate(game.killed):
snake_id = getattr(player.snake, 'snake_id', None)
if not snake_id:
continue
result_snakes[snake_id] = dict(
game = dict(
game_turns = game.turn_count,
player_turns = player.turns,
killed_order = killed_order+1,
path=os.path.basename(game_json_path),
),
turns = player.turns,
err = killed_order * killed_order * player.turns if player.health else 0
)
# return a map of snake_id => game stats
result = dict(
game_json = game_json_path_rel,
gen_count = gen_count,
game_count = game_count,
game_round = game_round,
turn_count = game.turn_count,
func_size = game.killed[-1].snake.move_func.func.child_count(),
func = str(game.killed[-1].snake.move_func.func),
snakes = result_snakes,
)
return result
def
|
mkdir_p
|
identifier_name
|
|
final.py
|
t = int(alpha*P1[0] + (1 - alpha)*P2[0])
s = int(alpha*P2[0] + (1 - alpha)*P1[0])
if (t >= 0 and t <= 255 and s >= 0 and s <= 255):
C1[0] = t
C2[0] = s
if tx_crossover > rand_t:
#a
t = int(alpha*P1[1] + (1 - alpha)*P2[1])
s = int(alpha*P2[1] + (1 - alpha)*P1[1])
if (t >= 0 and t <= 130 and s >= 0 and s <= 130):
C1[1] = t
C2[1] = s
return (C1, C2)
# Metodo de selecao - roleta
def selecao_roleta(Fx, df):
posicao = 0
soma_acumulada = np.cumsum(Fx)
#tamanho = len(Fx)
#limite = soma_acumulada[tamanho-1]
#divisao = np.random.randint(1,9)
rand = np.random.uniform(0, 1)
for i, valor in enumerate(soma_acumulada):
if rand <= valor:
posicao = i
break
return df.loc[posicao]
def plot_geracao_3d(df, geracao, img):
populacao = df.copy()
if geracao%5 == 0:
print("Geracao: ", geracao)
fig = plt.figure(figsize= (16,9))
ax = plt.axes(projection = '3d')
ax.grid(b = True, color = 'grey', linestyle = '-.', linewidth = 0.3, alpha = 0.2)
my_cmap = plt.get_cmap('hsv')
sctt = ax.scatter3D(populacao['treshold'],populacao['contrast'], populacao["fx"], alpha = 0.8, c=populacao["fx"], cmap= my_cmap, marker = '.')
plt.title(f"Geracao {geracao}")
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_zlabel('f(x1,x2)')
fig.colorbar(sctt, ax=ax, shrink= 0.5, aspect = 5)
plt.savefig(f'Resultados/Imagens/{geracao}-{img}.png')
plt.clf()
plt.close()
def plot_evolucao_temporal(melhores, piores, medias):
x = [i for i in range(0,len(melhores))]
y_melhor = []
y_pior = []
y_media = []
for i in range(len(melhores)):
y_melhor.append(f_alpine02(melhores[i]))
y_media.append(f_alpine02(medias[i]))
y_pior.append(f_alpine02(piores[i]))
fig = plt.figure(figsize= (16,9))
ax = plt.axes()
plt.plot(x,y_melhor,'b-.', label = "Melhor")
plt.plot(x,y_media, 'g-.', label = "Media")
plt.plot(x,y_pior, 'r-.', label = "Pior")
plt.title("Evolucao temporal do algoritmo")
ax.set_xlabel('Geracao')
ax.set_ylabel('f(x1,x2)')
plt.legend()
plt.show()
plt.clf()
plt.close()
def plot_variaveis_populacao(melhores, piores, medias):
x1_melhores = []
x2_melhores = []
x1_medias = []
x2_medias = []
x1_piores = []
x2_piores = []
for i in range(len(melhores)):
x1_melhores.append(melhores[i]['X1'])
x2_melhores.append(melhores[i]['X2'])
x1_medias.append(medias[i]['X1'])
x2_medias.append(medias[i]['X2'])
x1_piores.append(piores[i]['X1'])
x2_piores.append(piores[i]['X2'])
fig = plt.figure(figsize= (16,9))
ax = plt.axes()
plt.plot(x1_melhores,x2_melhores,'b-.', label = "Melhor")
plt.plot(x1_medias,x2_medias, 'g-.', label = "Media")
plt.plot(x1_piores,x2_piores, 'r-.', label = "Pior")
plt.title("Evolucao dos atributos da populacao")
ax.set_xlabel('x1')
ax.set_ylabel('x2')
plt.legend()
plt.show()
plt.clf()
plt.close()
def populacao_media(populacao):
return populacao.mean(axis = 0)
def genetic_algorithm(tamanho_pop, tx_mutacao, tx_crossover, total_geracoes, image_path, image):
start = timer()
melhor_geracao = 0
# Cria dataframe e preenche com valores randomicos
populacao = pd.DataFrame()
populacao['treshold'] = np.random.randint(0, 255, size=(tamanho_pop))
populacao['contrast'] = np.random.randint(0, 100, size=(tamanho_pop))
#print(populacao)
# melhor
best = []
pior = []
melhor = []
media = []
for geracao in tqdm(range(total_geracoes)) :
populacao['fx'] = funcao_objetivo(populacao,image_path)
# ordena
populacao_ordenada = populacao.sort_values('fx', axis= 0)
# Pegar melhor individuo da geracao atual
if geracao == 0:
best = populacao_ordenada.loc[0]
elif best['fx'] < populacao_ordenada.loc[0]['fx']:
best = populacao_ordenada.loc[0]
melhor_geracao = geracao
melhor.append(populacao_ordenada.loc[0])
pior.append(populacao_ordenada.loc[tamanho_pop-1])
media.append(populacao_media(populacao_ordenada.copy()))
Fx = aptidao_proporcional(populacao)
nova_populacao = pd.DataFrame(columns = ['treshold', 'contrast'])
for i in range(0, int(tamanho_pop/2)):
# chama selecao
P1 = selecao_roleta(Fx, populacao)
# para que não haja repetição de individuos
while(True):
P2 = selecao_roleta(Fx, populacao)
if( P2['treshold'] != P1['treshold'] or P2['contrast'] != P1['contrast']):
break
# transforma P1 e P2 em vetor
P1 = P1.to_numpy()
P2 = P2.to_numpy()
# faz crossover
C1, C2 = crossover(P1, P2, tx_crossover)
# Realizar mutacao de C1 e C2
C1 = mutacao_uniforme(C1, tx_mutacao)
C2 = mutacao_uniforme(C2, tx_mutacao)
nova_populacao=nova_populacao.append(pd.DataFrame(data=[C1, C2], columns = ['treshold', 'contrast'] ))
plot_geracao_3d(populacao,geracao,image)
populacao = nova_populacao.reset_index(drop=True)
n_neuronios = getCount(cv.imread(image_path),best["treshold"], best["contrast"])
end = timer()
tempo = (timedelta(seconds=end-start))
print("==========================================")
plot_geracao_3d(populacao,geracao, image)
#plot_evolucao_temporal(melhor,media,pior)
print("Melhor individuo individuo: ")
print("Treshold:", best['treshold'])
print("Contrast: ",best['contrast'])
print("Numero de neuronios: ", n_neuronios)
print("Geracao: ",melhor_geracao)
return (melhor_geracao, best['treshold'], best['contrast'], n_neuronios, tempo)
def unsharp_mask(image, kernel_size=(5, 5), sigma=1.0, amount=1.0, threshold=0):
blurred = cv.GaussianBlur(image, kernel_size, sigma)
sharpened = float(amount + 1) * image - float(amount) * blurred
sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))
sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))
sharpened = sharpened.round().astype(np.uint8)
if threshold > 0:
low
|
_contrast_mask = np.absolute(image - blurred) < threshold
np.copyto(sharpened, image, where=low_contrast_mask)
|
conditional_block
|
|
final.py
|
cv.threshold(dist_transform, 0.3*dist_transform.max(),255,0)
last_image2 = np.uint8(last_image)
cnts = cv.findContours(last_image2.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
return len(cnts)
def check_neuron(path,treshold, contrast):
for str_file in os.listdir(path):
filename = f"{path}/{str_file}"
image = cv.imread(filename)
count = getCount(image,treshold, contrast)
print(count)
def funcao_objetivo(populacao,filename):
arr = []
image = cv.imread(filename)
for i in tqdm(range(len(populacao))):
count = getCount(image,populacao.loc[i]["treshold"], populacao.loc[i]["contrast"])
arr.append(count)
return arr
def aptidao_proporcional(df):
Fx = []
sum_fx = sum(df['fx'])
for i, row in df.iterrows():
Fx.append(row['fx']/sum_fx)
return Fx
# Mutacao
def mutacao_uniforme(individuo, tx_mutacao):
individuo_mutado = []
# como a taxa de mutacao eh entre 0 e 1, temos:
rand_treshrold = np.random.random()
if rand_treshrold < tx_mutacao:
individuo_mutado.append(np.random.randint(0, 255))
else:
individuo_mutado.append(individuo[0])
rand_contrast = np.random.random()
if rand_contrast < tx_mutacao:
individuo_mutado.append(np.random.randint(0, 130))
else:
individuo_mutado.append(individuo[1])
return individuo_mutado
def crossover(P1, P2, tx_crossover):
C1 = P1
C2 = P2
alpha = np.random.random()
rand_c = np.random.random()
rand_t = np.random.random()
if tx_crossover > rand_c:
#a
t = int(alpha*P1[0] + (1 - alpha)*P2[0])
s = int(alpha*P2[0] + (1 - alpha)*P1[0])
if (t >= 0 and t <= 255 and s >= 0 and s <= 255):
C1[0] = t
C2[0] = s
if tx_crossover > rand_t:
#a
t = int(alpha*P1[1] + (1 - alpha)*P2[1])
s = int(alpha*P2[1] + (1 - alpha)*P1[1])
if (t >= 0 and t <= 130 and s >= 0 and s <= 130):
C1[1] = t
C2[1] = s
return (C1, C2)
# Metodo de selecao - roleta
def selecao_roleta(Fx, df):
posicao = 0
soma_acumulada = np.cumsum(Fx)
#tamanho = len(Fx)
#limite = soma_acumulada[tamanho-1]
#divisao = np.random.randint(1,9)
rand = np.random.uniform(0, 1)
for i, valor in enumerate(soma_acumulada):
if rand <= valor:
posicao = i
break
return df.loc[posicao]
def plot_geracao_3d(df, geracao, img):
populacao = df.copy()
if geracao%5 == 0:
print("Geracao: ", geracao)
fig = plt.figure(figsize= (16,9))
ax = plt.axes(projection = '3d')
ax.grid(b = True, color = 'grey', linestyle = '-.', linewidth = 0.3, alpha = 0.2)
my_cmap = plt.get_cmap('hsv')
sctt = ax.scatter3D(populacao['treshold'],populacao['contrast'], populacao["fx"], alpha = 0.8, c=populacao["fx"], cmap= my_cmap, marker = '.')
plt.title(f"Geracao {geracao}")
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_zlabel('f(x1,x2)')
fig.colorbar(sctt, ax=ax, shrink= 0.5, aspect = 5)
plt.savefig(f'Resultados/Imagens/{geracao}-{img}.png')
plt.clf()
plt.close()
def
|
(melhores, piores, medias):
x = [i for i in range(0,len(melhores))]
y_melhor = []
y_pior = []
y_media = []
for i in range(len(melhores)):
y_melhor.append(f_alpine02(melhores[i]))
y_media.append(f_alpine02(medias[i]))
y_pior.append(f_alpine02(piores[i]))
fig = plt.figure(figsize= (16,9))
ax = plt.axes()
plt.plot(x,y_melhor,'b-.', label = "Melhor")
plt.plot(x,y_media, 'g-.', label = "Media")
plt.plot(x,y_pior, 'r-.', label = "Pior")
plt.title("Evolucao temporal do algoritmo")
ax.set_xlabel('Geracao')
ax.set_ylabel('f(x1,x2)')
plt.legend()
plt.show()
plt.clf()
plt.close()
def plot_variaveis_populacao(melhores, piores, medias):
x1_melhores = []
x2_melhores = []
x1_medias = []
x2_medias = []
x1_piores = []
x2_piores = []
for i in range(len(melhores)):
x1_melhores.append(melhores[i]['X1'])
x2_melhores.append(melhores[i]['X2'])
x1_medias.append(medias[i]['X1'])
x2_medias.append(medias[i]['X2'])
x1_piores.append(piores[i]['X1'])
x2_piores.append(piores[i]['X2'])
fig = plt.figure(figsize= (16,9))
ax = plt.axes()
plt.plot(x1_melhores,x2_melhores,'b-.', label = "Melhor")
plt.plot(x1_medias,x2_medias, 'g-.', label = "Media")
plt.plot(x1_piores,x2_piores, 'r-.', label = "Pior")
plt.title("Evolucao dos atributos da populacao")
ax.set_xlabel('x1')
ax.set_ylabel('x2')
plt.legend()
plt.show()
plt.clf()
plt.close()
def populacao_media(populacao):
return populacao.mean(axis = 0)
def genetic_algorithm(tamanho_pop, tx_mutacao, tx_crossover, total_geracoes, image_path, image):
start = timer()
melhor_geracao = 0
# Cria dataframe e preenche com valores randomicos
populacao = pd.DataFrame()
populacao['treshold'] = np.random.randint(0, 255, size=(tamanho_pop))
populacao['contrast'] = np.random.randint(0, 100, size=(tamanho_pop))
#print(populacao)
# melhor
best = []
pior = []
melhor = []
media = []
for geracao in tqdm(range(total_geracoes)) :
populacao['fx'] = funcao_objetivo(populacao,image_path)
# ordena
populacao_ordenada = populacao.sort_values('fx', axis= 0)
# Pegar melhor individuo da geracao atual
if geracao == 0:
best = populacao_ordenada.loc[0]
elif best['fx'] < populacao_ordenada.loc[0]['fx']:
best = populacao_ordenada.loc[0]
melhor_geracao = geracao
melhor.append(populacao_ordenada.loc[0])
pior.append(populacao_ordenada.loc[tamanho_pop-1])
media.append(populacao_media(populacao_ordenada.copy()))
Fx = aptidao_proporcional(populacao)
nova_populacao = pd.DataFrame(columns = ['treshold', 'contrast'])
for i in range(0, int(tamanho_pop/2)):
# chama selecao
P1 = selecao_roleta(Fx, populacao)
# para que não haja repetição de individuos
while(True):
P2 = selecao_roleta(Fx, populacao)
if( P2['treshold'] != P1['treshold'] or P2['contrast'] != P1['contrast']):
break
# transforma P1 e P2 em vetor
P1 = P1.to
|
plot_evolucao_temporal
|
identifier_name
|
final.py
|
[1])
s = int(alpha*P2[1] + (1 - alpha)*P1[1])
if (t >= 0 and t <= 130 and s >= 0 and s <= 130):
C1[1] = t
C2[1] = s
return (C1, C2)
# Metodo de selecao - roleta
def selecao_roleta(Fx, df):
posicao = 0
soma_acumulada = np.cumsum(Fx)
#tamanho = len(Fx)
#limite = soma_acumulada[tamanho-1]
#divisao = np.random.randint(1,9)
rand = np.random.uniform(0, 1)
for i, valor in enumerate(soma_acumulada):
if rand <= valor:
posicao = i
break
return df.loc[posicao]
def plot_geracao_3d(df, geracao, img):
populacao = df.copy()
if geracao%5 == 0:
print("Geracao: ", geracao)
fig = plt.figure(figsize= (16,9))
ax = plt.axes(projection = '3d')
ax.grid(b = True, color = 'grey', linestyle = '-.', linewidth = 0.3, alpha = 0.2)
my_cmap = plt.get_cmap('hsv')
sctt = ax.scatter3D(populacao['treshold'],populacao['contrast'], populacao["fx"], alpha = 0.8, c=populacao["fx"], cmap= my_cmap, marker = '.')
plt.title(f"Geracao {geracao}")
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_zlabel('f(x1,x2)')
fig.colorbar(sctt, ax=ax, shrink= 0.5, aspect = 5)
plt.savefig(f'Resultados/Imagens/{geracao}-{img}.png')
plt.clf()
plt.close()
def plot_evolucao_temporal(melhores, piores, medias):
x = [i for i in range(0,len(melhores))]
y_melhor = []
y_pior = []
y_media = []
for i in range(len(melhores)):
y_melhor.append(f_alpine02(melhores[i]))
y_media.append(f_alpine02(medias[i]))
y_pior.append(f_alpine02(piores[i]))
fig = plt.figure(figsize= (16,9))
ax = plt.axes()
plt.plot(x,y_melhor,'b-.', label = "Melhor")
plt.plot(x,y_media, 'g-.', label = "Media")
plt.plot(x,y_pior, 'r-.', label = "Pior")
plt.title("Evolucao temporal do algoritmo")
ax.set_xlabel('Geracao')
ax.set_ylabel('f(x1,x2)')
plt.legend()
plt.show()
plt.clf()
plt.close()
def plot_variaveis_populacao(melhores, piores, medias):
x1_melhores = []
x2_melhores = []
x1_medias = []
x2_medias = []
x1_piores = []
x2_piores = []
for i in range(len(melhores)):
x1_melhores.append(melhores[i]['X1'])
x2_melhores.append(melhores[i]['X2'])
x1_medias.append(medias[i]['X1'])
x2_medias.append(medias[i]['X2'])
x1_piores.append(piores[i]['X1'])
x2_piores.append(piores[i]['X2'])
fig = plt.figure(figsize= (16,9))
ax = plt.axes()
plt.plot(x1_melhores,x2_melhores,'b-.', label = "Melhor")
plt.plot(x1_medias,x2_medias, 'g-.', label = "Media")
plt.plot(x1_piores,x2_piores, 'r-.', label = "Pior")
plt.title("Evolucao dos atributos da populacao")
ax.set_xlabel('x1')
ax.set_ylabel('x2')
plt.legend()
plt.show()
plt.clf()
plt.close()
def populacao_media(populacao):
return populacao.mean(axis = 0)
def genetic_algorithm(tamanho_pop, tx_mutacao, tx_crossover, total_geracoes, image_path, image):
start = timer()
melhor_geracao = 0
# Cria dataframe e preenche com valores randomicos
populacao = pd.DataFrame()
populacao['treshold'] = np.random.randint(0, 255, size=(tamanho_pop))
populacao['contrast'] = np.random.randint(0, 100, size=(tamanho_pop))
#print(populacao)
# melhor
best = []
pior = []
melhor = []
media = []
for geracao in tqdm(range(total_geracoes)) :
populacao['fx'] = funcao_objetivo(populacao,image_path)
# ordena
populacao_ordenada = populacao.sort_values('fx', axis= 0)
# Pegar melhor individuo da geracao atual
if geracao == 0:
best = populacao_ordenada.loc[0]
elif best['fx'] < populacao_ordenada.loc[0]['fx']:
best = populacao_ordenada.loc[0]
melhor_geracao = geracao
melhor.append(populacao_ordenada.loc[0])
pior.append(populacao_ordenada.loc[tamanho_pop-1])
media.append(populacao_media(populacao_ordenada.copy()))
Fx = aptidao_proporcional(populacao)
nova_populacao = pd.DataFrame(columns = ['treshold', 'contrast'])
for i in range(0, int(tamanho_pop/2)):
# chama selecao
P1 = selecao_roleta(Fx, populacao)
# para que não haja repetição de individuos
while(True):
P2 = selecao_roleta(Fx, populacao)
if( P2['treshold'] != P1['treshold'] or P2['contrast'] != P1['contrast']):
break
# transforma P1 e P2 em vetor
P1 = P1.to_numpy()
P2 = P2.to_numpy()
# faz crossover
C1, C2 = crossover(P1, P2, tx_crossover)
# Realizar mutacao de C1 e C2
C1 = mutacao_uniforme(C1, tx_mutacao)
C2 = mutacao_uniforme(C2, tx_mutacao)
nova_populacao=nova_populacao.append(pd.DataFrame(data=[C1, C2], columns = ['treshold', 'contrast'] ))
plot_geracao_3d(populacao,geracao,image)
populacao = nova_populacao.reset_index(drop=True)
n_neuronios = getCount(cv.imread(image_path),best["treshold"], best["contrast"])
end = timer()
tempo = (timedelta(seconds=end-start))
print("==========================================")
plot_geracao_3d(populacao,geracao, image)
#plot_evolucao_temporal(melhor,media,pior)
print("Melhor individuo individuo: ")
print("Treshold:", best['treshold'])
print("Contrast: ",best['contrast'])
print("Numero de neuronios: ", n_neuronios)
print("Geracao: ",melhor_geracao)
return (melhor_geracao, best['treshold'], best['contrast'], n_neuronios, tempo)
def unsharp_mask(image, kernel_size=(5, 5), sigma=1.0, amount=1.0, threshold=0):
blurred = cv.GaussianBlur(image, kernel_size, sigma)
sharpened = float(amount + 1) * image - float(amount) * blurred
sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))
sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))
sharpened = sharpened.round().astype(np.uint8)
if threshold > 0:
low_contrast_mask = np.absolute(image - blurred) < threshold
np.copyto(sharpened, image, where=low_contrast_mask)
#cv.imshow("Display window", sharpened)
return sharpened
def getCount(image,treshold,contrast):
f = 131*(contrast + 127)/(127*(131-contrast))
alpha_c = f
gamma_c = 127*(1-f)
|
new_image = cv.addWeighted( image, alpha_c, image, 0, gamma_c) #add contrast for image
image = unsharp_mask(new_image)
image_blur_gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
|
random_line_split
|
|
final.py
|
cv.threshold(dist_transform, 0.3*dist_transform.max(),255,0)
last_image2 = np.uint8(last_image)
cnts = cv.findContours(last_image2.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
return len(cnts)
def check_neuron(path,treshold, contrast):
for str_file in os.listdir(path):
filename = f"{path}/{str_file}"
image = cv.imread(filename)
count = getCount(image,treshold, contrast)
print(count)
def funcao_objetivo(populacao,filename):
arr = []
image = cv.imread(filename)
for i in tqdm(range(len(populacao))):
count = getCount(image,populacao.loc[i]["treshold"], populacao.loc[i]["contrast"])
arr.append(count)
return arr
def aptidao_proporcional(df):
Fx = []
sum_fx = sum(df['fx'])
for i, row in df.iterrows():
Fx.append(row['fx']/sum_fx)
return Fx
# Mutacao
def mutacao_uniforme(individuo, tx_mutacao):
|
def crossover(P1, P2, tx_crossover):
C1 = P1
C2 = P2
alpha = np.random.random()
rand_c = np.random.random()
rand_t = np.random.random()
if tx_crossover > rand_c:
#a
t = int(alpha*P1[0] + (1 - alpha)*P2[0])
s = int(alpha*P2[0] + (1 - alpha)*P1[0])
if (t >= 0 and t <= 255 and s >= 0 and s <= 255):
C1[0] = t
C2[0] = s
if tx_crossover > rand_t:
#a
t = int(alpha*P1[1] + (1 - alpha)*P2[1])
s = int(alpha*P2[1] + (1 - alpha)*P1[1])
if (t >= 0 and t <= 130 and s >= 0 and s <= 130):
C1[1] = t
C2[1] = s
return (C1, C2)
# Metodo de selecao - roleta
def selecao_roleta(Fx, df):
posicao = 0
soma_acumulada = np.cumsum(Fx)
#tamanho = len(Fx)
#limite = soma_acumulada[tamanho-1]
#divisao = np.random.randint(1,9)
rand = np.random.uniform(0, 1)
for i, valor in enumerate(soma_acumulada):
if rand <= valor:
posicao = i
break
return df.loc[posicao]
def plot_geracao_3d(df, geracao, img):
populacao = df.copy()
if geracao%5 == 0:
print("Geracao: ", geracao)
fig = plt.figure(figsize= (16,9))
ax = plt.axes(projection = '3d')
ax.grid(b = True, color = 'grey', linestyle = '-.', linewidth = 0.3, alpha = 0.2)
my_cmap = plt.get_cmap('hsv')
sctt = ax.scatter3D(populacao['treshold'],populacao['contrast'], populacao["fx"], alpha = 0.8, c=populacao["fx"], cmap= my_cmap, marker = '.')
plt.title(f"Geracao {geracao}")
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_zlabel('f(x1,x2)')
fig.colorbar(sctt, ax=ax, shrink= 0.5, aspect = 5)
plt.savefig(f'Resultados/Imagens/{geracao}-{img}.png')
plt.clf()
plt.close()
def plot_evolucao_temporal(melhores, piores, medias):
x = [i for i in range(0,len(melhores))]
y_melhor = []
y_pior = []
y_media = []
for i in range(len(melhores)):
y_melhor.append(f_alpine02(melhores[i]))
y_media.append(f_alpine02(medias[i]))
y_pior.append(f_alpine02(piores[i]))
fig = plt.figure(figsize= (16,9))
ax = plt.axes()
plt.plot(x,y_melhor,'b-.', label = "Melhor")
plt.plot(x,y_media, 'g-.', label = "Media")
plt.plot(x,y_pior, 'r-.', label = "Pior")
plt.title("Evolucao temporal do algoritmo")
ax.set_xlabel('Geracao')
ax.set_ylabel('f(x1,x2)')
plt.legend()
plt.show()
plt.clf()
plt.close()
def plot_variaveis_populacao(melhores, piores, medias):
x1_melhores = []
x2_melhores = []
x1_medias = []
x2_medias = []
x1_piores = []
x2_piores = []
for i in range(len(melhores)):
x1_melhores.append(melhores[i]['X1'])
x2_melhores.append(melhores[i]['X2'])
x1_medias.append(medias[i]['X1'])
x2_medias.append(medias[i]['X2'])
x1_piores.append(piores[i]['X1'])
x2_piores.append(piores[i]['X2'])
fig = plt.figure(figsize= (16,9))
ax = plt.axes()
plt.plot(x1_melhores,x2_melhores,'b-.', label = "Melhor")
plt.plot(x1_medias,x2_medias, 'g-.', label = "Media")
plt.plot(x1_piores,x2_piores, 'r-.', label = "Pior")
plt.title("Evolucao dos atributos da populacao")
ax.set_xlabel('x1')
ax.set_ylabel('x2')
plt.legend()
plt.show()
plt.clf()
plt.close()
def populacao_media(populacao):
return populacao.mean(axis = 0)
def genetic_algorithm(tamanho_pop, tx_mutacao, tx_crossover, total_geracoes, image_path, image):
start = timer()
melhor_geracao = 0
# Cria dataframe e preenche com valores randomicos
populacao = pd.DataFrame()
populacao['treshold'] = np.random.randint(0, 255, size=(tamanho_pop))
populacao['contrast'] = np.random.randint(0, 100, size=(tamanho_pop))
#print(populacao)
# melhor
best = []
pior = []
melhor = []
media = []
for geracao in tqdm(range(total_geracoes)) :
populacao['fx'] = funcao_objetivo(populacao,image_path)
# ordena
populacao_ordenada = populacao.sort_values('fx', axis= 0)
# Pegar melhor individuo da geracao atual
if geracao == 0:
best = populacao_ordenada.loc[0]
elif best['fx'] < populacao_ordenada.loc[0]['fx']:
best = populacao_ordenada.loc[0]
melhor_geracao = geracao
melhor.append(populacao_ordenada.loc[0])
pior.append(populacao_ordenada.loc[tamanho_pop-1])
media.append(populacao_media(populacao_ordenada.copy()))
Fx = aptidao_proporcional(populacao)
nova_populacao = pd.DataFrame(columns = ['treshold', 'contrast'])
for i in range(0, int(tamanho_pop/2)):
# chama selecao
P1 = selecao_roleta(Fx, populacao)
# para que não haja repetição de individuos
while(True):
P2 = selecao_roleta(Fx, populacao)
if( P2['treshold'] != P1['treshold'] or P2['contrast'] != P1['contrast']):
break
# transforma P1 e P2 em vetor
P1 = P1.to
|
individuo_mutado = []
# como a taxa de mutacao eh entre 0 e 1, temos:
rand_treshrold = np.random.random()
if rand_treshrold < tx_mutacao:
individuo_mutado.append(np.random.randint(0, 255))
else:
individuo_mutado.append(individuo[0])
rand_contrast = np.random.random()
if rand_contrast < tx_mutacao:
individuo_mutado.append(np.random.randint(0, 130))
else:
individuo_mutado.append(individuo[1])
return individuo_mutado
|
identifier_body
|
SCRIPTER.js
|
) {
if (p[x].treasure) {
pts.push(p[x]);
}
}
// If there are more than two then move them
if (pts.length > 1) {
for (var y = 0; y < pts.length; ++y) {
pts[y].location = "room_53";
}
println("SUDDENLY, A MUMMY CREEPS UP BEHIND YOU!! \"I'M THE KEEPER OF THE TOMB\", HE WAILS, \"I TAKE THESE TREASURES AND PUT THEM IN THE CHEST DEEP IN THE MAZE!\" HE GRABS YOUR TREASURE AND VANISHES INTO THE GLOOM.");
chest.location = "room_53";
}
}
return true;
},
AssertObjectXIsInPack : function(obj) {
debugMes("AssertObjectXIsInPack("+obj+")");
var o = OBJS.getTopObjectByName(obj);
if(o.location === 'pack')
|
return false;
},
AssertObjectXIsInCurrentRoom : function(obj) {
debugMes("AssertObjectXIsInCurrentRoom("+obj+")");
var o = OBJS.getTopObjectByName(obj);
if(o.location === GAME.currentRoom) {
return true;
}
return false;
},
AssertObjectXIsInCurrentRoomOrPack : function(obj) {
debugMes("AssertObjectXIsInRoomOrPack("+obj+")");
var o = OBJS.getTopObjectByName(obj);
if(o.location === 'pack' || o.location === GAME.currentRoom) {
return true;
}
return false;
},
PrintMessageX : function(message) {
debugMes("PrintMessageX()");
println(message);
return true;
},
PrintRoomDescription : function() {
debugMes("PrintRoomDescription()");
look();
return true;
},
MoveObjectXToRoomY : function(x,y) {
debugMes("MoveObjectXToRoomY("+x+","+y+")");
var objX = OBJS.getExactObjectByName(x);
objX.location = y;
return true;
},
Quit : function () {
debugMes("Quit()");
SCRIPTCOMMANDS.PrintScore();
GAME.gameOver = true;
return;
},
PlayerDied : function () {
debugMes("PlayerDied()");
SCRIPTCOMMANDS.PrintScore();
GAME.gameOver = true;
return;
},
MoveObjectXToIntoContainerY : function (x,y) {
debugMes("MoveObjectXToRoomY("+x+","+y+")");
var objX = OBJS.getExactObjectByName(x);
objX.location = y;
return true;
},
MoveObjectXToCurrentRoom : function (x) {
debugMes("MoveObjectXToCurrentRoom("+x+")");
var objX = OBJS.getExactObjectByName(x);
objX.location = GAME.currentRoom;
println("OK");
return true;
},
PrintScore : function () {
debugMes("PrintScore()");
var score = 0;
var obs = OBJS.getAllObjects();
for(var x=0;x<obs.length;++x) {
var ob = obs[x];
if (ob.treasure) {
// Could be in chest. Where is the chest?
ob = OBJS.getTopObjectByName(ob.name);
if (ob.location === "room_2") {
score = score + 20;
} else if (ob.location === "pack") {
score = score + 5;
}
}
}
println("YOU HAVE SCORED " + score + " OUT OF 220.");
println("USING " + GAME.turnCount + " TURNS.");
return true;
},
AssertObjectXMatchesUserInput : function(obj) {
debugMes("AssertObjectXMatchesUserInput("+obj+") ["+inputNoun.name+"]");
if(obj === inputNoun.name) {
return true;
}
return false;
},
GetUserInputObject : function() {
debugMes("GetUserInputObject() ["+inputNoun.name+"]");
if(!inputNoun.packable) {
println("DON'T BE REDICULOUS!")
return true;
}
if(OBJS.getTopObjectByName(inputNoun.name).location === "pack") {
println("YOU ARE ALREADY CARRYING IT.");
return true;
}
var objs = OBJS.getObjectsAtLocation("pack");
if (objs.length > 7) {
println("YOU CAN'T CARRY ANYTHING MORE. YOU'LL HAVE TO DROP SOMETHING FIRST.");
return true;
}
OBJS.getTopObjectByName(inputNoun.name).location = "pack";
println("OK");
return true;
},
GetObjectXFromRoom : function (x) {
debugMes("GetObjectXFromRoom("+x+")");
var objs = OBJS.getObjectsAtLocation("pack");
if(!inputNoun.packable) {
println("DON'T BE REDICULOUS!")
return true;
}
if (objs.length > 7) {
println("YOU CAN'T CARRY ANYTHING MORE. YOU'LL HAVE TO DROP SOMETHING FIRST.");
return true;
}
var objX = OBJS.getExactObjectByName(x);
objX.location = "pack";
println("OK");
return true;
},
PrintInventory : function() {
debugMes("PrintInventory()");
var objs = OBJS.getObjectsAtLocation("pack");
if (objs.length===0) {
println("YOU'RE NOT CARRYING ANYTHING.");
} else {
for (var x=0;x<objs.length;++x) {
println(objs[x].short);
}
}
return true;
},
DropUserInputObject : function() {
debugMes("DropUserInputObject() ["+inputNoun.name+"]");
OBJS.getTopObjectByName(inputNoun.name).location = GAME.currentRoom;
println("OK");
return true;
},
DropObjectX : function (x) {
debugMes("DropObjectX("+x+")");
var objX = OBJS.getExactObjectByName(x);
objX.location = GAME.currentRoom;
println("OK");
return true;
},
PrintOK : function() {
debugMes("PrintOK()");
println("OK");
return true;
},
JumpToTopOfGameLoop : function() {
debugMes("JumpToTopOfGameLoop()");
return true;
},
AssertRandomIsGreaterThanX : function (num) {
debugMes("AssertRandomIsGreaterThanX("+num+")");
var val = getRandomByte();
if(num<=val) {
println("YOU HAVE CRAWLED AROUND IN SOME LITTLE HOLES AND WOUND UP BACK IN THE MAIN PASSAGE.");
SCRIPTCOMMANDS.look();
return true;
}
return false;
},
MoveToRoomXIfItWasLastRoom : function (room) {
// Never used in game
debugMes("MoveToRoomXIfItWasLastRoom("+room+")");
if(room === GAME.lastRoom) {
return SCRIPTCOMMANDS.MoveToRoomX(room);
} else {
return false;
}
},
MoveToLastRoom : function () {
debugMes("MoveToLastRoom()");
if(!GAME.lastRoom) {
println("SORRY, BUT I NO LONGER SEEM TO REMEMBER HOW IT WAS YOU GOT HERE.");
return true;
}
return SCRIPTCOMMANDS.MoveToRoomX(GAME.lastRoom);
},
AssertPackIsEmptyExceptForEmerald : function() {
debugMes("AssertPackIsEmptyExceptForEmerald()");
var objs = OBJS.getObjectsAtLocation("pack");
if(objs.length===0) return true;
if(objs.length>1) return false;
if(objs[0].name !== "#EMERALD") return false;
return true;
},
SaveGame : function () {
var state = "";
var objs = OBJS.getAllObjects();
for (var x=0;x<objs.length;++x) {
var lo = objs[x].location;
if(lo.indexOf("room_")==0) {
lo = lo.substring(4);
}
state = state + lo+",";
}
state = "LOAD "+state + GAME.currentRoom+","+GAME.lastRoom+","+GAME.turnCount+","+GAME.gameOver+","+GAME.batteryLife;
println("Please copy the following long line to a text file to save your game. Then paste in the line to reload.");
println("");
println(state);
println("");
return true;
},
LoadGame : function () {
var state = PARSE.loadState.split(",");
|
{
return true;
}
|
conditional_block
|
SCRIPTER.js
|
{
if (p[x].treasure) {
pts.push(p[x]);
}
}
// If there are more than two then move them
if (pts.length > 1) {
for (var y = 0; y < pts.length; ++y) {
pts[y].location = "room_53";
}
println("SUDDENLY, A MUMMY CREEPS UP BEHIND YOU!! \"I'M THE KEEPER OF THE TOMB\", HE WAILS, \"I TAKE THESE TREASURES AND PUT THEM IN THE CHEST DEEP IN THE MAZE!\" HE GRABS YOUR TREASURE AND VANISHES INTO THE GLOOM.");
chest.location = "room_53";
}
}
return true;
},
AssertObjectXIsInPack : function(obj) {
debugMes("AssertObjectXIsInPack("+obj+")");
var o = OBJS.getTopObjectByName(obj);
if(o.location === 'pack') {
return true;
}
return false;
},
AssertObjectXIsInCurrentRoom : function(obj) {
debugMes("AssertObjectXIsInCurrentRoom("+obj+")");
var o = OBJS.getTopObjectByName(obj);
if(o.location === GAME.currentRoom) {
return true;
}
return false;
},
AssertObjectXIsInCurrentRoomOrPack : function(obj) {
debugMes("AssertObjectXIsInRoomOrPack("+obj+")");
var o = OBJS.getTopObjectByName(obj);
if(o.location === 'pack' || o.location === GAME.currentRoom) {
return true;
}
return false;
},
PrintMessageX : function(message) {
debugMes("PrintMessageX()");
println(message);
return true;
},
PrintRoomDescription : function() {
debugMes("PrintRoomDescription()");
look();
return true;
},
MoveObjectXToRoomY : function(x,y) {
debugMes("MoveObjectXToRoomY("+x+","+y+")");
var objX = OBJS.getExactObjectByName(x);
objX.location = y;
return true;
},
Quit : function () {
debugMes("Quit()");
SCRIPTCOMMANDS.PrintScore();
GAME.gameOver = true;
return;
},
PlayerDied : function () {
debugMes("PlayerDied()");
SCRIPTCOMMANDS.PrintScore();
GAME.gameOver = true;
return;
},
MoveObjectXToIntoContainerY : function (x,y) {
debugMes("MoveObjectXToRoomY("+x+","+y+")");
var objX = OBJS.getExactObjectByName(x);
objX.location = y;
return true;
},
MoveObjectXToCurrentRoom : function (x) {
|
var objX = OBJS.getExactObjectByName(x);
objX.location = GAME.currentRoom;
println("OK");
return true;
},
PrintScore : function () {
debugMes("PrintScore()");
var score = 0;
var obs = OBJS.getAllObjects();
for(var x=0;x<obs.length;++x) {
var ob = obs[x];
if (ob.treasure) {
// Could be in chest. Where is the chest?
ob = OBJS.getTopObjectByName(ob.name);
if (ob.location === "room_2") {
score = score + 20;
} else if (ob.location === "pack") {
score = score + 5;
}
}
}
println("YOU HAVE SCORED " + score + " OUT OF 220.");
println("USING " + GAME.turnCount + " TURNS.");
return true;
},
AssertObjectXMatchesUserInput : function(obj) {
debugMes("AssertObjectXMatchesUserInput("+obj+") ["+inputNoun.name+"]");
if(obj === inputNoun.name) {
return true;
}
return false;
},
GetUserInputObject : function() {
debugMes("GetUserInputObject() ["+inputNoun.name+"]");
if(!inputNoun.packable) {
println("DON'T BE REDICULOUS!")
return true;
}
if(OBJS.getTopObjectByName(inputNoun.name).location === "pack") {
println("YOU ARE ALREADY CARRYING IT.");
return true;
}
var objs = OBJS.getObjectsAtLocation("pack");
if (objs.length > 7) {
println("YOU CAN'T CARRY ANYTHING MORE. YOU'LL HAVE TO DROP SOMETHING FIRST.");
return true;
}
OBJS.getTopObjectByName(inputNoun.name).location = "pack";
println("OK");
return true;
},
GetObjectXFromRoom : function (x) {
debugMes("GetObjectXFromRoom("+x+")");
var objs = OBJS.getObjectsAtLocation("pack");
if(!inputNoun.packable) {
println("DON'T BE REDICULOUS!")
return true;
}
if (objs.length > 7) {
println("YOU CAN'T CARRY ANYTHING MORE. YOU'LL HAVE TO DROP SOMETHING FIRST.");
return true;
}
var objX = OBJS.getExactObjectByName(x);
objX.location = "pack";
println("OK");
return true;
},
PrintInventory : function() {
debugMes("PrintInventory()");
var objs = OBJS.getObjectsAtLocation("pack");
if (objs.length===0) {
println("YOU'RE NOT CARRYING ANYTHING.");
} else {
for (var x=0;x<objs.length;++x) {
println(objs[x].short);
}
}
return true;
},
DropUserInputObject : function() {
debugMes("DropUserInputObject() ["+inputNoun.name+"]");
OBJS.getTopObjectByName(inputNoun.name).location = GAME.currentRoom;
println("OK");
return true;
},
DropObjectX : function (x) {
debugMes("DropObjectX("+x+")");
var objX = OBJS.getExactObjectByName(x);
objX.location = GAME.currentRoom;
println("OK");
return true;
},
PrintOK : function() {
debugMes("PrintOK()");
println("OK");
return true;
},
JumpToTopOfGameLoop : function() {
debugMes("JumpToTopOfGameLoop()");
return true;
},
AssertRandomIsGreaterThanX : function (num) {
debugMes("AssertRandomIsGreaterThanX("+num+")");
var val = getRandomByte();
if(num<=val) {
println("YOU HAVE CRAWLED AROUND IN SOME LITTLE HOLES AND WOUND UP BACK IN THE MAIN PASSAGE.");
SCRIPTCOMMANDS.look();
return true;
}
return false;
},
MoveToRoomXIfItWasLastRoom : function (room) {
// Never used in game
debugMes("MoveToRoomXIfItWasLastRoom("+room+")");
if(room === GAME.lastRoom) {
return SCRIPTCOMMANDS.MoveToRoomX(room);
} else {
return false;
}
},
MoveToLastRoom : function () {
debugMes("MoveToLastRoom()");
if(!GAME.lastRoom) {
println("SORRY, BUT I NO LONGER SEEM TO REMEMBER HOW IT WAS YOU GOT HERE.");
return true;
}
return SCRIPTCOMMANDS.MoveToRoomX(GAME.lastRoom);
},
AssertPackIsEmptyExceptForEmerald : function() {
debugMes("AssertPackIsEmptyExceptForEmerald()");
var objs = OBJS.getObjectsAtLocation("pack");
if(objs.length===0) return true;
if(objs.length>1) return false;
if(objs[0].name !== "#EMERALD") return false;
return true;
},
SaveGame : function () {
var state = "";
var objs = OBJS.getAllObjects();
for (var x=0;x<objs.length;++x) {
var lo = objs[x].location;
if(lo.indexOf("room_")==0) {
lo = lo.substring(4);
}
state = state + lo+",";
}
state = "LOAD "+state + GAME.currentRoom+","+GAME.lastRoom+","+GAME.turnCount+","+GAME.gameOver+","+GAME.batteryLife;
println("Please copy the following long line to a text file to save your game. Then paste in the line to reload.");
println("");
println(state);
println("");
return true;
},
LoadGame : function () {
var state = PARSE.loadState.split(",");
|
debugMes("MoveObjectXToCurrentRoom("+x+")");
|
random_line_split
|
my_finance_module.py
|
)})
Close = re.search('s\) " data-reactid="15">(.*?)<', inform)
if Close:
tmp = Close.group(1)
tmp = tmp.replace(",", "")
l.update({"Prev Close":float(tmp)})
Open = re.search('s\) " data-reactid="20">(.*?)<', inform)
if Open:
tmp = Open.group(1)
tmp = tmp.replace(",", "")
l.update({"Open":float(tmp)})
Bid = re.search('s\) " data-reactid="25">(.*?)<',inform)
if Bid:
tmp=Bid.group(1)
tmp = tmp.replace(",", "")
l.update({"Bid":tmp})
Ask = re.search('s\) " data-reactid="30">(.*?)<', inform)
if Ask:
tmp = Ask.group(1)
tmp = tmp.replace(",", "")
l.update({"Ask":tmp})
Volume = re.search('s\) " data-reactid="48">(.*?)<', inform)
if Volume:
tmp = Volume.group(1)
tmp = tmp.replace(",","")
l.update({"Volume":int(tmp)})
else:
l.update({"err":'error'})
return l
return l
####################################################
def add_time(mass):
os.environ['TZ'] = 'America/New_York'
time.
|
t=time.localtime()
mass.update({"time":str(t.tm_mday)+"-"+str(t.tm_mon)+"-"+str(t.tm_year)+" "+str(t.tm_hour)+":"+str(t.tm_min)+":"+str(t.tm_sec)})
return mass
####################################################
def preprocess_mass(strings,t,init_point):
Mass_DF={}
for str1 in strings:
# Mass_DF[str1]=pd.read_csv('data/'+ str(t.tm_mday)+"-"+str(t.tm_mon)+"-"+str(t.tm_year)+'/' + str1 + '.csv')
Mass_DF[str1] = pd.read_csv('data/full_data/'+str1+'.csv')
Mass_DF[str1] = Mass_DF[str1].iloc[-init_point:]
del Mass_DF[str1]['time.1']
Mass_DF[str1].index = Mass_DF[str1]['time']
del Mass_DF[str1]['time'] # убираем созданный в csv файле столбец с датой и временем
Mass_DF[str1].index =Mass_DF[str1].index.to_datetime()
Mass_DF[str1]["hour"] = Mass_DF[str1].index.hour
Mass_DF[str1]["minute"] = Mass_DF[str1].index.minute
Mass_DF[str1]["sec"] = Mass_DF[str1].index.second
return Mass_DF
#######################################################
MKdir_gr = lambda t: os.system('mkdir -p graphics/'+ str(t.tm_mday)+"-"+str(t.tm_mon)+"-"+str(t.tm_year) )
#######################################################
def drowing_picture(str1,Y,y_pred,y_pred1,X,test_col,train_point_print,col_p,point_pred,t,PC,rolling_mean,check_drow):
# делаем вид времени
time_interval = pd.DataFrame({"hour": X[-col_p:, 0], "minute": X[-col_p:, 1], "sec": X[-col_p:, 2]})
time_interval['date'] = time_interval['hour'].astype('str') + ":" + time_interval['minute'].astype('str') + ":" + \
time_interval['sec'].astype('str')
fmt = dates.DateFormatter("%H:%M:%S")
time_interval1 = [dt.datetime.strptime(i, "%H:%M:%S") for i in time_interval['date']]
# оцениваем качество предсказаний
accuracy = my_mean_sqeared_error(Y[-(test_col):] , y_pred1[-(test_col):] )
acc_str = "mean squared error: %.4f%%" % accuracy
# print(acc_str)
ma = my_average(Y[-(test_col):] , y_pred1[-(test_col):] )
ma_str = "average error: %.3f%%" % ma
# print(ma_str)
mde,tuk = max_delta(Y[-(test_col):], y_pred1[-(test_col):] )
mde_str = "max delta error: %.3f%%" % mde
# рисуем
if check_drow:
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
text1 = acc_str + '\n' + ma_str +'\n' +mde_str
ax.text(0.02, 0.90, text1, bbox=dict(facecolor='white', alpha=0.7), transform=ax.transAxes, fontsize=12)
ax.plot(time_interval1, y_pred, 'r-', label="predict", linewidth=2)
ax.plot(time_interval1[:-point_pred], Y[-(test_col + train_point_print):], 'bo--', label="averaged sample", linewidth=1)
# ax.plot(time_interval1[:-point_pred], Y[-(test_col + train_point_print):], 'bo', label="test", linewidth=2)
ax.plot(time_interval1[:-point_pred],rolling_mean[-(test_col + train_point_print):])
plt.axvline(x=time_interval1[train_point_print - 1], color='k', linestyle='--', label='bound_train', linewidth=2)
plt.axvline(x=time_interval1[test_col + train_point_print - 1], color='g', linestyle='--', label='bound_test',
linewidth=2)
# plt.axhline(y=0, color='m', linestyle='--', label='zero', linewidth=2)
def price(x):
return "%"+"%.5f" % x
ax.set_ylabel('procent of diff price')
ax.set_xlabel('time (h:m:s)')
ax.format_ydata = price
ax.xaxis.set_major_formatter(fmt)
majorFormatter = FormatStrFormatter('%.3f%%')
ax.yaxis.set_major_formatter(majorFormatter)
minorLocator = AutoMinorLocator(n=2)
ax.xaxis.set_minor_locator(minorLocator)
ax.xaxis.set_minor_formatter(fmt)
ax.set_title('стоимость акций ' + str1)
for label in ax.xaxis.get_ticklabels(minor=True):
label.set_rotation(30)
label.set_fontsize(10)
for label in ax.xaxis.get_ticklabels():
label.set_rotation(30)
label.set_fontsize(10)
ax.legend(loc='upper right')
# рисуем сетку
ax.grid(True, which='major', color='grey', linestyle='dashed')
ax.grid(True, which='minor', color='grey', linestyle='dashed')
# fig.autofmt_xdate()
# plt.show()
MKdir_gr(t)
fig.savefig('graphics/'+ str(t.tm_mday)+"-"+str(t.tm_mon)+"-"+str(t.tm_year)+'/цена акции компании '+str1+ '.pdf',format = 'pdf',dpi=1000)
fig.clf()
#все эти свободные числа нужно вывести как управляющие параметры
last_price = my_single_average(y_pred[-4:]) #y_pred[-3:-2] #
av_Y = my_single_average(Y[-(test_col):])
#считаем Гауссову вероятность
if abs(ma) > abs(av_Y):
P = Gauss_probability(0.1,abs(ma),accuracy,mde)
else:
P = Gauss_probability(abs(1-abs(ma/av_Y)),abs(ma),accuracy,mde)
#выводим на экран данные
print(str1 +": procent %.3f%% of price in %d:%d:%d, probability: %.3f%% " % (last_price,time_interval[-3:-2]['hour'],time_interval[-3:-2]['minute'],time_interval[-3:-2]['sec'], P * 100) )
#######################################################
def boosting_solver(Mass_df,str1,delta_t,t,param_points,param_model,n,m,check_drow,cross_working):
# Mass_df[str1]['diff_price'] = Mass_df[str1]['Main Price'].diff() # изменение цены
Mass_df[str1]['procent_diff'] = ( Mass_df[str1]['Main Price'] - Mass_df[str1]['Prev Close'] ) / Mass_df[str1]['Prev Close'] *100
PC = Mass_df[str1]['Prev Close'][1]
# print(Mass_df[str1].columns)
X = Mass_df[str1][['hour','minute','sec']][n-1:].values #преобразовали тип dataFrame в тип array Numpy
# скользящая средняя
rolling_mean = Mass_df[str1]['procent_diff'].rolling(window=n).mean()
Y = rolling_mean[n-1:].values
test_col= param_points['test_col']
point_pred = param_points['point_pred']
train_point_print = param_points['train_point_print']
col_p = point_pred + test_col + train_point_print
#разделяем X и Y на обучающий и тестовый набор данных
X_train = X[:-test_col]
y_train = Y[:-test_col]
model = xgboost.XGBRegressor
|
tzset()
|
identifier_name
|
my_finance_module.py
|
})
Close = re.search('s\) " data-reactid="15">(.*?)<', inform)
if Close:
tmp = Close.group(1)
tmp = tmp.replace(",", "")
l.update({"Prev Close":float(tmp)})
Open = re.search('s\) " data-reactid="20">(.*?)<', inform)
if Open:
tmp = Open.group(1)
tmp = tmp.replace(",", "")
l.update({"Open":float(tmp)})
Bid = re.search('s\) " data-reactid="25">(.*?)<',inform)
if Bid:
tmp=Bid.group(1)
tmp = tmp.replace(",", "")
l.update({"Bid":tmp})
Ask = re.search('s\) " data-reactid="30">(.*?)<', inform)
if Ask:
tmp = Ask.group(1)
tmp = tmp.replace(",", "")
l.update({"Ask":tmp})
Volume = re.search('s\) " data-reactid="48">(.*?)<', inform)
if Volume:
tmp = Volume.group(1)
tmp = tmp.replace(",","")
l.update({"Volume":int(tmp)})
else:
l.update({"err":'error'})
return l
return l
####################################################
def add_time(mass):
os.environ['TZ'] = 'America/New_York'
time.tzset()
t=time.localtime()
mass.update({"time":str(t.tm_mday)+"-"+str(t.tm_mon)+"-"+str(t.tm_year)+" "+str(t.tm_hour)+":"+str(t.tm_min)+":"+str(t.tm_sec)})
return mass
####################################################
def preprocess_mass(strings,t,init_point):
Mass_DF={}
for str1 in strings:
# Mass_DF[str1]=pd.read_csv('data/'+ str(t.tm_mday)+"-"+str(t.tm_mon)+"-"+str(t.tm_year)+'/' + str1 + '.csv')
Mass_DF[str1] = pd.read_csv('data/full_data/'+str1+'.csv')
Mass_DF[str1] = Mass_DF[str1].iloc[-init_point:]
del Mass_DF[str1]['time.1']
Mass_DF[str1].index = Mass_DF[str1]['time']
del Mass_DF[str1]['time'] # убираем созданный в csv файле столбец с датой и временем
Mass_DF[str1].index =Mass_DF[str1].index.to_datetime()
Mass_DF[str1]["hour"] = Mass_DF[str1].index.hour
Mass_DF[str1]["minute"] = Mass_DF[str1].index.minute
Mass_DF[str1]["sec"] = Mass_DF[str1].index.second
return Mass_DF
#######################################################
MKdir_gr = lambda t: os.system('mkdir -p graphics/'+ str(t.tm_mday)+"-"+str(t.tm_mon)+"-"+str(t.tm_year) )
#######################################################
def drowing_picture(str1,Y,y_pred,y_pred1,X,test_col,train_point_print,col_p,point_pred,t,PC,rolling_mean,check_drow):
# делаем вид времени
time_interval = pd.DataFrame({"hour": X[-col_p:, 0], "minute": X[-col_p:, 1], "sec": X[-col_p:, 2]})
time_interval['date
|
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
text1 = acc_str + '\n' + ma_str +'\n' +mde_str
ax.text(0.02, 0.90, text1, bbox=dict(facecolor='white', alpha=0.7), transform=ax.transAxes, fontsize=12)
ax.plot(time_interval1, y_pred, 'r-', label="predict", linewidth=2)
ax.plot(time_interval1[:-point_pred], Y[-(test_col + train_point_print):], 'bo--', label="averaged sample", linewidth=1)
# ax.plot(time_interval1[:-point_pred], Y[-(test_col + train_point_print):], 'bo', label="test", linewidth=2)
ax.plot(time_interval1[:-point_pred],rolling_mean[-(test_col + train_point_print):])
plt.axvline(x=time_interval1[train_point_print - 1], color='k', linestyle='--', label='bound_train', linewidth=2)
plt.axvline(x=time_interval1[test_col + train_point_print - 1], color='g', linestyle='--', label='bound_test',
linewidth=2)
# plt.axhline(y=0, color='m', linestyle='--', label='zero', linewidth=2)
def price(x):
return "%"+"%.5f" % x
ax.set_ylabel('procent of diff price')
ax.set_xlabel('time (h:m:s)')
ax.format_ydata = price
ax.xaxis.set_major_formatter(fmt)
majorFormatter = FormatStrFormatter('%.3f%%')
ax.yaxis.set_major_formatter(majorFormatter)
minorLocator = AutoMinorLocator(n=2)
ax.xaxis.set_minor_locator(minorLocator)
ax.xaxis.set_minor_formatter(fmt)
ax.set_title('стоимость акций ' + str1)
for label in ax.xaxis.get_ticklabels(minor=True):
label.set_rotation(30)
label.set_fontsize(10)
for label in ax.xaxis.get_ticklabels():
label.set_rotation(30)
label.set_fontsize(10)
ax.legend(loc='upper right')
# рисуем сетку
ax.grid(True, which='major', color='grey', linestyle='dashed')
ax.grid(True, which='minor', color='grey', linestyle='dashed')
# fig.autofmt_xdate()
# plt.show()
MKdir_gr(t)
fig.savefig('graphics/'+ str(t.tm_mday)+"-"+str(t.tm_mon)+"-"+str(t.tm_year)+'/цена акции компании '+str1+ '.pdf',format = 'pdf',dpi=1000)
fig.clf()
#все эти свободные числа нужно вывести как управляющие параметры
last_price = my_single_average(y_pred[-4:]) #y_pred[-3:-2] #
av_Y = my_single_average(Y[-(test_col):])
#считаем Гауссову вероятность
if abs(ma) > abs(av_Y):
P = Gauss_probability(0.1,abs(ma),accuracy,mde)
else:
P = Gauss_probability(abs(1-abs(ma/av_Y)),abs(ma),accuracy,mde)
#выводим на экран данные
print(str1 +": procent %.3f%% of price in %d:%d:%d, probability: %.3f%% " % (last_price,time_interval[-3:-2]['hour'],time_interval[-3:-2]['minute'],time_interval[-3:-2]['sec'], P * 100) )
#######################################################
def boosting_solver(Mass_df,str1,delta_t,t,param_points,param_model,n,m,check_drow,cross_working):
# Mass_df[str1]['diff_price'] = Mass_df[str1]['Main Price'].diff() # изменение цены
Mass_df[str1]['procent_diff'] = ( Mass_df[str1]['Main P
rice'] - Mass_df[str1]['Prev Close'] ) / Mass_df[str1]['Prev Close'] *100
PC = Mass_df[str1]['Prev Close'][1]
# print(Mass_df[str1].columns)
X = Mass_df[str1][['hour','minute','sec']][n-1:].values #преобразовали тип dataFrame в тип array Numpy
# скользящая средняя
rolling_mean = Mass_df[str1]['procent_diff'].rolling(window=n).mean()
Y = rolling_mean[n-1:].values
test_col= param_points['test_col']
point_pred = param_points['point_pred']
train_point_print = param_points['train_point_print']
col_p = point_pred + test_col + train_point_print
#разделяем X и Y на обучающий и тестовый набор данных
X_train = X[:-test_col]
y_train = Y[:-test_col]
model = xgboost.XGB
|
'] = time_interval['hour'].astype('str') + ":" + time_interval['minute'].astype('str') + ":" + \
time_interval['sec'].astype('str')
fmt = dates.DateFormatter("%H:%M:%S")
time_interval1 = [dt.datetime.strptime(i, "%H:%M:%S") for i in time_interval['date']]
# оцениваем качество предсказаний
accuracy = my_mean_sqeared_error(Y[-(test_col):] , y_pred1[-(test_col):] )
acc_str = "mean squared error: %.4f%%" % accuracy
# print(acc_str)
ma = my_average(Y[-(test_col):] , y_pred1[-(test_col):] )
ma_str = "average error: %.3f%%" % ma
# print(ma_str)
mde,tuk = max_delta(Y[-(test_col):], y_pred1[-(test_col):] )
mde_str = "max delta error: %.3f%%" % mde
# рисуем
if check_drow:
|
identifier_body
|
my_finance_module.py
|
)})
Close = re.search('s\) " data-reactid="15">(.*?)<', inform)
if Close:
tmp = Close.group(1)
tmp = tmp.replace(",", "")
l.update({"Prev Close":float(tmp)})
Open = re.search('s\) " data-reactid="20">(.*?)<', inform)
if Open:
tmp = Open.group(1)
tmp = tmp.replace(",", "")
l.update({"Open":float(tmp)})
Bid = re.search('s\) " data-reactid="25">(.*?)<',inform)
if Bid:
tmp=Bid.group(1)
tmp = tmp.replace(",", "")
l.update({"Bid":tmp})
Ask = re.search('s\) " data-reactid="30">(.*?)<', inform)
if Ask:
tmp = Ask.group(1)
tmp = tmp.replace(",", "")
l.update({"Ask":tmp})
Volume = re.search('s\) " data-reactid="48">(.*?)<', inform)
if Volume:
tmp = Volume.group(1)
tmp = tmp.replace(",","")
l.update({"Volume":int(tmp)})
else:
l.update({"err":'error'})
return l
return l
####################################################
def add_time(mass):
os.environ['TZ'] = 'America/New_York'
time.tzset()
t=time.localtime()
mass.update({"time":str(t.tm_mday)+"-"+str(t.tm_mon)+"-"+str(t.tm_year)+" "+str(t.tm_hour)+":"+str(t.tm_min)+":"+str(t.tm_sec)})
return mass
####################################################
def preprocess_mass(strings,t,init_point):
Mass_DF={}
for str1 in strings:
# Mass_DF[str1]=pd.read_csv('data/'+ str(t.tm_mday)+"-"+str(t.tm_mon)+"-"+str(t.tm_year)+'/' + str1 + '.csv')
Mass_DF[str1] = pd.read_csv('data/full_data/'+str1+'.csv')
Mass_DF[str1] = Mass_DF[str1].iloc[-init_point:]
del Mass_DF[str1]['time.1']
Mass_DF[str1].index = Mass_DF[str1]['time']
del Mass_DF[str1]['time'] # убираем созданный в csv файле столбец с датой и временем
Mass_DF[str1].index =Mass_DF[str1].index.to_datetime()
Mass_DF[str1]["hour"] = Mass_DF[str1].index.hour
Mass_DF[str1]["minute"] = Mass_DF[str1].index.minute
Mass_DF[str1]["sec"] = Mass_DF[str1].index.second
return Mass_DF
#######################################################
MKdir_gr = lambda t: os.system('mkdir -p graphics/'+ str(t.tm_mday)+"-"+str(t.tm_mon)+"-"+str(t.tm_year) )
#######################################################
def drowing_picture(str1,Y,y_pred,y_pred1,X,test_col,train_point_print,col_p,point_pred,t,PC,rolling_mean,check_drow):
# делаем вид времени
time_interval = pd.DataFrame({"hour": X[-col_p:, 0], "minute": X[-col_p:, 1], "sec": X[-col_p:, 2]})
|
time_interval['sec'].astype('str')
fmt = dates.DateFormatter("%H:%M:%S")
time_interval1 = [dt.datetime.strptime(i, "%H:%M:%S") for i in time_interval['date']]
# оцениваем качество предсказаний
accuracy = my_mean_sqeared_error(Y[-(test_col):] , y_pred1[-(test_col):] )
acc_str = "mean squared error: %.4f%%" % accuracy
# print(acc_str)
ma = my_average(Y[-(test_col):] , y_pred1[-(test_col):] )
ma_str = "average error: %.3f%%" % ma
# print(ma_str)
mde,tuk = max_delta(Y[-(test_col):], y_pred1[-(test_col):] )
mde_str = "max delta error: %.3f%%" % mde
# рисуем
if check_drow:
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
text1 = acc_str + '\n' + ma_str +'\n' +mde_str
ax.text(0.02, 0.90, text1, bbox=dict(facecolor='white', alpha=0.7), transform=ax.transAxes, fontsize=12)
ax.plot(time_interval1, y_pred, 'r-', label="predict", linewidth=2)
ax.plot(time_interval1[:-point_pred], Y[-(test_col + train_point_print):], 'bo--', label="averaged sample", linewidth=1)
# ax.plot(time_interval1[:-point_pred], Y[-(test_col + train_point_print):], 'bo', label="test", linewidth=2)
ax.plot(time_interval1[:-point_pred],rolling_mean[-(test_col + train_point_print):])
plt.axvline(x=time_interval1[train_point_print - 1], color='k', linestyle='--', label='bound_train', linewidth=2)
plt.axvline(x=time_interval1[test_col + train_point_print - 1], color='g', linestyle='--', label='bound_test',
linewidth=2)
# plt.axhline(y=0, color='m', linestyle='--', label='zero', linewidth=2)
def price(x):
return "%"+"%.5f" % x
ax.set_ylabel('procent of diff price')
ax.set_xlabel('time (h:m:s)')
ax.format_ydata = price
ax.xaxis.set_major_formatter(fmt)
majorFormatter = FormatStrFormatter('%.3f%%')
ax.yaxis.set_major_formatter(majorFormatter)
minorLocator = AutoMinorLocator(n=2)
ax.xaxis.set_minor_locator(minorLocator)
ax.xaxis.set_minor_formatter(fmt)
ax.set_title('стоимость акций ' + str1)
for label in ax.xaxis.get_ticklabels(minor=True):
label.set_rotation(30)
label.set_fontsize(10)
for label in ax.xaxis.get_ticklabels():
label.set_rotation(30)
label.set_fontsize(10)
ax.legend(loc='upper right')
# рисуем сетку
ax.grid(True, which='major', color='grey', linestyle='dashed')
ax.grid(True, which='minor', color='grey', linestyle='dashed')
# fig.autofmt_xdate()
# plt.show()
MKdir_gr(t)
fig.savefig('graphics/'+ str(t.tm_mday)+"-"+str(t.tm_mon)+"-"+str(t.tm_year)+'/цена акции компании '+str1+ '.pdf',format = 'pdf',dpi=1000)
fig.clf()
#все эти свободные числа нужно вывести как управляющие параметры
last_price = my_single_average(y_pred[-4:]) #y_pred[-3:-2] #
av_Y = my_single_average(Y[-(test_col):])
#считаем Гауссову вероятность
if abs(ma) > abs(av_Y):
P = Gauss_probability(0.1,abs(ma),accuracy,mde)
else:
P = Gauss_probability(abs(1-abs(ma/av_Y)),abs(ma),accuracy,mde)
#выводим на экран данные
print(str1 +": procent %.3f%% of price in %d:%d:%d, probability: %.3f%% " % (last_price,time_interval[-3:-2]['hour'],time_interval[-3:-2]['minute'],time_interval[-3:-2]['sec'], P * 100) )
#######################################################
def boosting_solver(Mass_df,str1,delta_t,t,param_points,param_model,n,m,check_drow,cross_working):
# Mass_df[str1]['diff_price'] = Mass_df[str1]['Main Price'].diff() # изменение цены
Mass_df[str1]['procent_diff'] = ( Mass_df[str1]['Main Price'] - Mass_df[str1]['Prev Close'] ) / Mass_df[str1]['Prev Close'] *100
PC = Mass_df[str1]['Prev Close'][1]
# print(Mass_df[str1].columns)
X = Mass_df[str1][['hour','minute','sec']][n-1:].values #преобразовали тип dataFrame в тип array Numpy
# скользящая средняя
rolling_mean = Mass_df[str1]['procent_diff'].rolling(window=n).mean()
Y = rolling_mean[n-1:].values
test_col= param_points['test_col']
point_pred = param_points['point_pred']
train_point_print = param_points['train_point_print']
col_p = point_pred + test_col + train_point_print
#разделяем X и Y на обучающий и тестовый набор данных
X_train = X[:-test_col]
y_train = Y[:-test_col]
model = xgboost.XGBRegressor
|
time_interval['date'] = time_interval['hour'].astype('str') + ":" + time_interval['minute'].astype('str') + ":" + \
|
random_line_split
|
my_finance_module.py
|
})
Close = re.search('s\) " data-reactid="15">(.*?)<', inform)
if Close:
tmp = Close.group(1)
tmp = tmp.replace(",", "")
l.update({"Prev Close":float(tmp)})
Open = re.search('s\) " data-reactid="20">(.*?)<', inform)
if Open:
tmp = Open.group(1)
tmp = tmp.replace(",", "")
l.update({"Open":float(tmp)})
Bid = re.search('s\) " data-reactid="25">(.*?)<',inform)
if Bid:
tmp=Bid.group(1)
tmp = tmp.replace(",", "")
l.update({"Bid":tmp})
Ask = re.search('s\) " data-reactid="30">(.*?)<', inform)
if Ask:
tmp = Ask.group(1)
tmp = tmp.replace(",", "")
l.update({"Ask":tmp})
Volume = re.search('s\) " data-reactid="48">(.*?)<', inform)
if Volume:
tmp = Volume.group(1)
tmp = tmp.replace(",","")
l.update({"Volume":int(tmp)})
else:
l.update({"err":'error'})
return l
return l
####################################################
def add_time(mass):
os.environ['TZ'] = 'America/New_York'
time.tzset()
t=time.localtime()
mass.update({"time":str(t.tm_mday)+"-"+str(t.tm_mon)+"-"+str(t.tm_year)+" "+str(t.tm_hour)+":"+str(t.tm_min)+":"+str(t.tm_sec)})
return mass
####################################################
def preprocess_mass(strings,t,init_point):
Mass_DF={}
for str1 in strings:
# Mass_DF[str1]=pd.read_csv('data/'+ str(t.tm_mday)+"-"+str(t.tm_mon)+"-"+str(t.tm_year)+'/' + str1 + '.csv')
Mass_DF[str1] = pd.read_csv('data/full_data/'+str1+'.csv')
Mass_DF[str1] = Mass_DF[str1].iloc[-init_point:]
del Mass_DF[str1]['time.1']
Mass_DF[str1].index = Mass_DF[str1]['time']
del Mass_DF[str1]['time'] # убираем созданный в csv файле столбец с датой и временем
Mass_DF[str1].index =Mass_DF[str1].index.to_datetime()
Mass_DF[str1]["hour"] = Mass_DF[str1].index.hour
Mass_DF[str1]["minute"] = Mass_DF[str1].index.minute
Mass_DF[str1]["sec"] = Mass_DF[str1].index.second
return Mass_DF
#######################################################
MKdir_gr = lambda t: os.system('mkdir -p graphics/'+ str(t.tm_mday)+"-"+str(t.tm_mon)+"-"+str(t.tm_year) )
#######################################################
def drowing_picture(str1,Y,y_pred,y_pred1,X,test_col,train_point_print,col_p,point_pred,t,PC,rolling_mean,check_drow):
# делаем вид времени
time_interval = pd.DataFrame({"hour": X[-col_p:, 0], "minute": X[-col_p:, 1], "sec": X[-col_p:, 2]})
time_interval['date'] = time_interval['hour'].astype('str') + ":" + time_interval['minute'].astype('str') + ":" + \
time_interval['sec'].astype('str')
fmt = dates.DateFormatter("%H:%M:%S")
time_interval1 = [dt.datetime.strptime(i, "%H:%M:%S") for i in time_interval['date']]
# оцениваем качество предсказаний
accuracy = my_mean_sqeared_error(Y[-(test_col):] , y_pred1[-(test_col):] )
acc_str = "mean squared error: %.4f%%" % accuracy
# print(acc_str)
ma = my_average(Y[-(test_col):] , y_pred1[-(test_col):] )
ma_str = "average error: %.3f%%" % ma
# print(ma_str)
mde,tuk = max_delta(Y[-(test_col):], y_pred1[-(test_col):] )
mde_str = "max delta error: %.3f%%" % mde
# рисуем
if check_drow:
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111)
text1 = acc_str + '\n' + ma_str +'\n' +mde_str
ax.text(0.02, 0.90, text1, bbox=dic
|
ax.xaxis.set_major_formatter(fmt)
majorFormatter = FormatStrFormatter('%.3f%%')
ax.yaxis.set_major_formatter(majorFormatter)
minorLocator = AutoMinorLocator(n=2)
ax.xaxis.set_minor_locator(minorLocator)
ax.xaxis.set_minor_formatter(fmt)
ax.set_title('стоимость акций ' + str1)
for label in ax.xaxis.get_ticklabels(minor=True):
label.set_rotation(30)
label.set_fontsize(10)
for label in ax.xaxis.get_ticklabels():
label.set_rotation(30)
label.set_fontsize(10)
ax.legend(loc='upper right')
# рисуем сетку
ax.grid(True, which='major', color='grey', linestyle='dashed')
ax.grid(True, which='minor', color='grey', linestyle='dashed')
# fig.autofmt_xdate()
# plt.show()
MKdir_gr(t)
fig.savefig('graphics/'+ str(t.tm_mday)+"-"+str(t.tm_mon)+"-"+str(t.tm_year)+'/цена акции компании '+str1+ '.pdf',format = 'pdf',dpi=1000)
fig.clf()
#все эти свободные числа нужно вывести как управляющие параметры
last_price = my_single_average(y_pred[-4:]) #y_pred[-3:-2] #
av_Y = my_single_average(Y[-(test_col):])
#считаем Гауссов
у вероятность
if abs(ma) > abs(av_Y):
P = Gauss_probability(0.1,abs(ma),accuracy,mde)
else:
P = Gauss_probability(abs(1-abs(ma/av_Y)),abs(ma),accuracy,mde)
#выводим на экран данные
print(str1 +": procent %.3f%% of price in %d:%d:%d, probability: %.3f%% " % (last_price,time_interval[-3:-2]['hour'],time_interval[-3:-2]['minute'],time_interval[-3:-2]['sec'], P * 100) )
#######################################################
def boosting_solver(Mass_df,str1,delta_t,t,param_points,param_model,n,m,check_drow,cross_working):
# Mass_df[str1]['diff_price'] = Mass_df[str1]['Main Price'].diff() # изменение цены
Mass_df[str1]['procent_diff'] = ( Mass_df[str1]['Main Price'] - Mass_df[str1]['Prev Close'] ) / Mass_df[str1]['Prev Close'] *100
PC = Mass_df[str1]['Prev Close'][1]
# print(Mass_df[str1].columns)
X = Mass_df[str1][['hour','minute','sec']][n-1:].values #преобразовали тип dataFrame в тип array Numpy
# скользящая средняя
rolling_mean = Mass_df[str1]['procent_diff'].rolling(window=n).mean()
Y = rolling_mean[n-1:].values
test_col= param_points['test_col']
point_pred = param_points['point_pred']
train_point_print = param_points['train_point_print']
col_p = point_pred + test_col + train_point_print
#разделяем X и Y на обучающий и тестовый набор данных
X_train = X[:-test_col]
y_train = Y[:-test_col]
model = xgboost.XGB
|
t(facecolor='white', alpha=0.7), transform=ax.transAxes, fontsize=12)
ax.plot(time_interval1, y_pred, 'r-', label="predict", linewidth=2)
ax.plot(time_interval1[:-point_pred], Y[-(test_col + train_point_print):], 'bo--', label="averaged sample", linewidth=1)
# ax.plot(time_interval1[:-point_pred], Y[-(test_col + train_point_print):], 'bo', label="test", linewidth=2)
ax.plot(time_interval1[:-point_pred],rolling_mean[-(test_col + train_point_print):])
plt.axvline(x=time_interval1[train_point_print - 1], color='k', linestyle='--', label='bound_train', linewidth=2)
plt.axvline(x=time_interval1[test_col + train_point_print - 1], color='g', linestyle='--', label='bound_test',
linewidth=2)
# plt.axhline(y=0, color='m', linestyle='--', label='zero', linewidth=2)
def price(x):
return "%"+"%.5f" % x
ax.set_ylabel('procent of diff price')
ax.set_xlabel('time (h:m:s)')
ax.format_ydata = price
|
conditional_block
|
ante.go
|
(tx StdTx) StdFee {
return NewStdFee(txparam.DefaultMsgGas*uint64(len(tx.Msgs)), tx.Fee.GasPrice)
}
// NewAnteHandler returns an AnteHandler that checks and increments sequence
// numbers, checks signatures & account numbers, and deducts fees from the first
// signer.
func NewAnteHandler(ak AccountKeeper, fck FeeCollectionKeeper) sdk.AnteHandler {
return func(
ctx sdk.Context, tx sdk.Tx, simulate bool,
) (newCtx sdk.Context, res sdk.Result, abort bool) {
// all transactions must be of type auth.StdTx
stdTx, ok := tx.(StdTx)
log.Debugln("NewAnteHandler:tx", tx)
log.Debugln("NewAnteHandler:stdTx.Msgs", stdTx.Msgs)
log.Debugln("NewAnteHandler:stdTx.Memo", stdTx.Memo)
log.Debugln("NewAnteHandler:stdTx.Fee.Amount", stdTx.Fee.Amount())
log.Debugln("NewAnteHandler:stdTx.Fee.GasWanted", stdTx.Fee.GasWanted)
log.Debugln("NewAnteHandler:stdTx.Fee.GasPrices", stdTx.Fee.GasPrice)
log.Debugln("NewAnteHandler:stdTx.Fee", stdTx.Fee)
if !ok {
// Set a gas meter with limit 0 as to prevent an infinite gas meter attack
// during runTx.
newCtx = SetGasMeter(simulate, ctx, 0)
return newCtx, sdk.ErrInternal("tx must be StdTx").Result(), true
}
params := ak.GetParams(ctx)
// Ensure that the provided fees meet a minimum threshold for the validator,
// if this is a CheckTx. This is only for local mempool purposes, and thus
// is only ran on check tx.
// junying-todo, 2019-11-07
// Check if Fee.Amount > Fee.Gas * minGasPrice or not
// It can be rephrased in Fee.GasPrices() > minGasPrice or not?
if ctx.IsCheckTx() && !simulate {
res := EnsureSufficientMempoolFees(ctx, stdTx.Fee)
if !res.IsOK() {
return newCtx, res, true
}
}
newCtx = SetGasMeter(simulate, ctx, stdTx.Fee.GasWanted)
// AnteHandlers must have their own defer/recover in order for the BaseApp
// to know how much gas was used! This is because the GasMeter is created in
// the AnteHandler, but if it panics the context won't be set properly in
// runTx's recover call.
// junying-todo, 2019-08-27
// conventional gas metering isn't necessary anymore
// evm will replace it.
// junying-todo, 2019-10-24
// this is enabled again in order to handle non-htdfservice txs.
defer func() {
if r := recover(); r != nil {
switch rType := r.(type) {
case sdk.ErrorOutOfGas:
log := fmt.Sprintf(
"out of gas in location: %v; gasWanted: %d, gasUsed: %d",
rType.Descriptor, stdTx.Fee.GasWanted, newCtx.GasMeter().GasConsumed(),
)
res = sdk.ErrOutOfGas(log).Result()
res.GasWanted = stdTx.Fee.GasWanted
res.GasUsed = newCtx.GasMeter().GasConsumed()
abort = true
default:
panic(r)
}
}
}()
// junying-todo, 2019-11-13
// planed to be moved to baseapp.ValidateTx by
if err := tx.ValidateBasic(); err != nil {
return newCtx, err.Result(), true
}
// junying-todo, 2019-11-13
// check gas,gasprice for non-genesis block
if err := stdTx.ValidateFee(); err != nil && ctx.BlockHeight() != 0 {
return newCtx, err.Result(), true
}
// junying-todo, 2019-08-27
// conventional gas consuming isn't necessary anymore
// evm will replace it.
// junying-todo, 2019-10-24
// this is enabled again in order to handle non-htdfservice txs.
// junying-todo, 2019-11-13
// GasMetering Disabled, Now Constant Gas used for Staking Txs
if !ExistsMsgSend(tx) {
newCtx.GasMeter().UseGas(sdk.Gas(txparam.DefaultMsgGas*uint64(len(stdTx.Msgs))), "AnteHandler")
}
if res := ValidateMemo(stdTx, params); !res.IsOK() {
return newCtx, res, true
}
// stdSigs contains the sequence number, account number, and signatures.
// When simulating, this would just be a 0-length slice.
signerAddrs := stdTx.GetSigners()
signerAccs := make([]Account, len(signerAddrs))
isGenesis := ctx.BlockHeight() == 0
// fetch first signer, who's going to pay the fees
signerAccs[0], res = GetSignerAcc(newCtx, ak, signerAddrs[0])
if !res.IsOK() {
return newCtx, res, true
}
// junying-todo, 2019-11-19
// Deduct(DefaultMsgGas * len(Msgs)) for non-htdfservice msgs
if !stdTx.Fee.Amount().IsZero() && !ExistsMsgSend(tx) {
estimatedFee := EstimateFee(stdTx)
signerAccs[0], res = DeductFees(ctx.BlockHeader().Time, signerAccs[0], estimatedFee)
if !res.IsOK() {
return newCtx, res, true
}
fck.AddCollectedFees(newCtx, estimatedFee.Amount())
}
// stdSigs contains the sequence number, account number, and signatures.
// When simulating, this would just be a 0-length slice.
stdSigs := stdTx.GetSignatures()
for i := 0; i < len(stdSigs); i++ {
// skip the fee payer, account is cached and fees were deducted already
if i != 0 {
signerAccs[i], res = GetSignerAcc(newCtx, ak, signerAddrs[i])
if !res.IsOK() {
return newCtx, res, true
}
}
log.Debugln("&&&&&&&&&&&&&&&&&&&&", newCtx.ChainID())
// check signature, return account with incremented nonce
signBytes := GetSignBytes(newCtx.ChainID(), stdTx, signerAccs[i], isGenesis)
signerAccs[i], res = processSig(newCtx, signerAccs[i], stdSigs[i], signBytes, simulate, params)
if !res.IsOK() {
return newCtx, res, true
}
ak.SetAccount(newCtx, signerAccs[i])
}
// TODO: tx tags (?)
log.Debugln("NewAnteHandler:FINISHED")
return newCtx, sdk.Result{GasWanted: stdTx.Fee.GasWanted}, false //, GasUsed: newCtx.GasMeter().GasConsumed()}, false // continue...
}
}
// GetSignerAcc returns an account for a given address that is expected to sign
// a transaction.
func GetSignerAcc(ctx sdk.Context, ak AccountKeeper, addr sdk.AccAddress) (Account, sdk.Result) {
if acc := ak.GetAccount(ctx, addr); acc != nil {
return acc, sdk.Result{}
}
return nil, sdk.ErrUnknownAddress(fmt.Sprintf("account %s does not exist", addr)).Result()
}
// ValidateMemo validates the memo size.
func ValidateMemo(stdTx StdTx, params Params) sdk.Result {
memoLength := len(stdTx.GetMemo())
if uint64(memoLength) > params.MaxMemoCharacters {
return sdk.ErrMemoTooLarge(
fmt.Sprintf(
"maximum number of characters is %d but received %d characters",
params.MaxMemoCharacters, memoLength,
),
).Result()
}
return sdk.Result{}
}
// verify the signature and increment the sequence. If the account doesn't have
// a pubkey, set it.
func processSig(
ctx sdk.Context, acc Account, sig StdSignature, signBytes []byte, simulate bool, params Params,
) (updatedAcc Account, res sdk.Result) {
pubKey, res := ProcessPubKey(acc, sig, simulate)
if !res.IsOK() {
|
EstimateFee
|
identifier_name
|
|
ante.go
|
Tx.Fee.GasPrices", stdTx.Fee.GasPrice)
log.Debugln("NewAnteHandler:stdTx.Fee", stdTx.Fee)
if !ok {
// Set a gas meter with limit 0 as to prevent an infinite gas meter attack
// during runTx.
newCtx = SetGasMeter(simulate, ctx, 0)
return newCtx, sdk.ErrInternal("tx must be StdTx").Result(), true
}
params := ak.GetParams(ctx)
// Ensure that the provided fees meet a minimum threshold for the validator,
// if this is a CheckTx. This is only for local mempool purposes, and thus
// is only ran on check tx.
// junying-todo, 2019-11-07
// Check if Fee.Amount > Fee.Gas * minGasPrice or not
// It can be rephrased in Fee.GasPrices() > minGasPrice or not?
if ctx.IsCheckTx() && !simulate {
res := EnsureSufficientMempoolFees(ctx, stdTx.Fee)
if !res.IsOK() {
return newCtx, res, true
}
}
newCtx = SetGasMeter(simulate, ctx, stdTx.Fee.GasWanted)
// AnteHandlers must have their own defer/recover in order for the BaseApp
// to know how much gas was used! This is because the GasMeter is created in
// the AnteHandler, but if it panics the context won't be set properly in
// runTx's recover call.
// junying-todo, 2019-08-27
// conventional gas metering isn't necessary anymore
// evm will replace it.
// junying-todo, 2019-10-24
// this is enabled again in order to handle non-htdfservice txs.
defer func() {
if r := recover(); r != nil {
switch rType := r.(type) {
case sdk.ErrorOutOfGas:
log := fmt.Sprintf(
"out of gas in location: %v; gasWanted: %d, gasUsed: %d",
rType.Descriptor, stdTx.Fee.GasWanted, newCtx.GasMeter().GasConsumed(),
)
res = sdk.ErrOutOfGas(log).Result()
res.GasWanted = stdTx.Fee.GasWanted
res.GasUsed = newCtx.GasMeter().GasConsumed()
abort = true
default:
panic(r)
}
}
}()
// junying-todo, 2019-11-13
// planed to be moved to baseapp.ValidateTx by
if err := tx.ValidateBasic(); err != nil {
return newCtx, err.Result(), true
}
// junying-todo, 2019-11-13
// check gas,gasprice for non-genesis block
if err := stdTx.ValidateFee(); err != nil && ctx.BlockHeight() != 0 {
return newCtx, err.Result(), true
}
// junying-todo, 2019-08-27
// conventional gas consuming isn't necessary anymore
// evm will replace it.
// junying-todo, 2019-10-24
// this is enabled again in order to handle non-htdfservice txs.
// junying-todo, 2019-11-13
// GasMetering Disabled, Now Constant Gas used for Staking Txs
if !ExistsMsgSend(tx)
|
if res := ValidateMemo(stdTx, params); !res.IsOK() {
return newCtx, res, true
}
// stdSigs contains the sequence number, account number, and signatures.
// When simulating, this would just be a 0-length slice.
signerAddrs := stdTx.GetSigners()
signerAccs := make([]Account, len(signerAddrs))
isGenesis := ctx.BlockHeight() == 0
// fetch first signer, who's going to pay the fees
signerAccs[0], res = GetSignerAcc(newCtx, ak, signerAddrs[0])
if !res.IsOK() {
return newCtx, res, true
}
// junying-todo, 2019-11-19
// Deduct(DefaultMsgGas * len(Msgs)) for non-htdfservice msgs
if !stdTx.Fee.Amount().IsZero() && !ExistsMsgSend(tx) {
estimatedFee := EstimateFee(stdTx)
signerAccs[0], res = DeductFees(ctx.BlockHeader().Time, signerAccs[0], estimatedFee)
if !res.IsOK() {
return newCtx, res, true
}
fck.AddCollectedFees(newCtx, estimatedFee.Amount())
}
// stdSigs contains the sequence number, account number, and signatures.
// When simulating, this would just be a 0-length slice.
stdSigs := stdTx.GetSignatures()
for i := 0; i < len(stdSigs); i++ {
// skip the fee payer, account is cached and fees were deducted already
if i != 0 {
signerAccs[i], res = GetSignerAcc(newCtx, ak, signerAddrs[i])
if !res.IsOK() {
return newCtx, res, true
}
}
log.Debugln("&&&&&&&&&&&&&&&&&&&&", newCtx.ChainID())
// check signature, return account with incremented nonce
signBytes := GetSignBytes(newCtx.ChainID(), stdTx, signerAccs[i], isGenesis)
signerAccs[i], res = processSig(newCtx, signerAccs[i], stdSigs[i], signBytes, simulate, params)
if !res.IsOK() {
return newCtx, res, true
}
ak.SetAccount(newCtx, signerAccs[i])
}
// TODO: tx tags (?)
log.Debugln("NewAnteHandler:FINISHED")
return newCtx, sdk.Result{GasWanted: stdTx.Fee.GasWanted}, false //, GasUsed: newCtx.GasMeter().GasConsumed()}, false // continue...
}
}
// GetSignerAcc returns an account for a given address that is expected to sign
// a transaction.
func GetSignerAcc(ctx sdk.Context, ak AccountKeeper, addr sdk.AccAddress) (Account, sdk.Result) {
if acc := ak.GetAccount(ctx, addr); acc != nil {
return acc, sdk.Result{}
}
return nil, sdk.ErrUnknownAddress(fmt.Sprintf("account %s does not exist", addr)).Result()
}
// ValidateMemo validates the memo size.
func ValidateMemo(stdTx StdTx, params Params) sdk.Result {
memoLength := len(stdTx.GetMemo())
if uint64(memoLength) > params.MaxMemoCharacters {
return sdk.ErrMemoTooLarge(
fmt.Sprintf(
"maximum number of characters is %d but received %d characters",
params.MaxMemoCharacters, memoLength,
),
).Result()
}
return sdk.Result{}
}
// verify the signature and increment the sequence. If the account doesn't have
// a pubkey, set it.
func processSig(
ctx sdk.Context, acc Account, sig StdSignature, signBytes []byte, simulate bool, params Params,
) (updatedAcc Account, res sdk.Result) {
pubKey, res := ProcessPubKey(acc, sig, simulate)
if !res.IsOK() {
return nil, res
}
err := acc.SetPubKey(pubKey)
if err != nil {
return nil, sdk.ErrInternal("setting PubKey on signer's account").Result()
}
if simulate {
// Simulated txs should not contain a signature and are not required to
// contain a pubkey, so we must account for tx size of including a
// StdSignature (Amino encoding) and simulate gas consumption
// (assuming a SECP256k1 simulation key).
consumeSimSigGas(ctx.GasMeter(), pubKey, sig, params)
// log.Debugln("NewAnteHandler.processSig:simulated in")
}
if res := consumeSigVerificationGas(ctx.GasMeter(), sig.Signature, pubKey, params); !res.IsOK() {
return nil, res
}
if !simulate && !pubKey.VerifyBytes(signBytes, sig.Signature) {
return nil, sdk.ErrUnauthorized("signature verification failed; verify correct account number, account sequence and/or chain-id").Result()
}
if err := acc.SetSequence(acc.GetSequence() + 1); err != nil {
panic(err)
}
return acc, res
}
func
|
{
newCtx.GasMeter().UseGas(sdk.Gas(txparam.DefaultMsgGas*uint64(len(stdTx.Msgs))), "AnteHandler")
}
|
conditional_block
|
ante.go
|
stdTx.Fee.GasPrices", stdTx.Fee.GasPrice)
log.Debugln("NewAnteHandler:stdTx.Fee", stdTx.Fee)
if !ok {
// Set a gas meter with limit 0 as to prevent an infinite gas meter attack
// during runTx.
newCtx = SetGasMeter(simulate, ctx, 0)
return newCtx, sdk.ErrInternal("tx must be StdTx").Result(), true
|
params := ak.GetParams(ctx)
// Ensure that the provided fees meet a minimum threshold for the validator,
// if this is a CheckTx. This is only for local mempool purposes, and thus
// is only ran on check tx.
// junying-todo, 2019-11-07
// Check if Fee.Amount > Fee.Gas * minGasPrice or not
// It can be rephrased in Fee.GasPrices() > minGasPrice or not?
if ctx.IsCheckTx() && !simulate {
res := EnsureSufficientMempoolFees(ctx, stdTx.Fee)
if !res.IsOK() {
return newCtx, res, true
}
}
newCtx = SetGasMeter(simulate, ctx, stdTx.Fee.GasWanted)
// AnteHandlers must have their own defer/recover in order for the BaseApp
// to know how much gas was used! This is because the GasMeter is created in
// the AnteHandler, but if it panics the context won't be set properly in
// runTx's recover call.
// junying-todo, 2019-08-27
// conventional gas metering isn't necessary anymore
// evm will replace it.
// junying-todo, 2019-10-24
// this is enabled again in order to handle non-htdfservice txs.
defer func() {
if r := recover(); r != nil {
switch rType := r.(type) {
case sdk.ErrorOutOfGas:
log := fmt.Sprintf(
"out of gas in location: %v; gasWanted: %d, gasUsed: %d",
rType.Descriptor, stdTx.Fee.GasWanted, newCtx.GasMeter().GasConsumed(),
)
res = sdk.ErrOutOfGas(log).Result()
res.GasWanted = stdTx.Fee.GasWanted
res.GasUsed = newCtx.GasMeter().GasConsumed()
abort = true
default:
panic(r)
}
}
}()
// junying-todo, 2019-11-13
// planed to be moved to baseapp.ValidateTx by
if err := tx.ValidateBasic(); err != nil {
return newCtx, err.Result(), true
}
// junying-todo, 2019-11-13
// check gas,gasprice for non-genesis block
if err := stdTx.ValidateFee(); err != nil && ctx.BlockHeight() != 0 {
return newCtx, err.Result(), true
}
// junying-todo, 2019-08-27
// conventional gas consuming isn't necessary anymore
// evm will replace it.
// junying-todo, 2019-10-24
// this is enabled again in order to handle non-htdfservice txs.
// junying-todo, 2019-11-13
// GasMetering Disabled, Now Constant Gas used for Staking Txs
if !ExistsMsgSend(tx) {
newCtx.GasMeter().UseGas(sdk.Gas(txparam.DefaultMsgGas*uint64(len(stdTx.Msgs))), "AnteHandler")
}
if res := ValidateMemo(stdTx, params); !res.IsOK() {
return newCtx, res, true
}
// stdSigs contains the sequence number, account number, and signatures.
// When simulating, this would just be a 0-length slice.
signerAddrs := stdTx.GetSigners()
signerAccs := make([]Account, len(signerAddrs))
isGenesis := ctx.BlockHeight() == 0
// fetch first signer, who's going to pay the fees
signerAccs[0], res = GetSignerAcc(newCtx, ak, signerAddrs[0])
if !res.IsOK() {
return newCtx, res, true
}
// junying-todo, 2019-11-19
// Deduct(DefaultMsgGas * len(Msgs)) for non-htdfservice msgs
if !stdTx.Fee.Amount().IsZero() && !ExistsMsgSend(tx) {
estimatedFee := EstimateFee(stdTx)
signerAccs[0], res = DeductFees(ctx.BlockHeader().Time, signerAccs[0], estimatedFee)
if !res.IsOK() {
return newCtx, res, true
}
fck.AddCollectedFees(newCtx, estimatedFee.Amount())
}
// stdSigs contains the sequence number, account number, and signatures.
// When simulating, this would just be a 0-length slice.
stdSigs := stdTx.GetSignatures()
for i := 0; i < len(stdSigs); i++ {
// skip the fee payer, account is cached and fees were deducted already
if i != 0 {
signerAccs[i], res = GetSignerAcc(newCtx, ak, signerAddrs[i])
if !res.IsOK() {
return newCtx, res, true
}
}
log.Debugln("&&&&&&&&&&&&&&&&&&&&", newCtx.ChainID())
// check signature, return account with incremented nonce
signBytes := GetSignBytes(newCtx.ChainID(), stdTx, signerAccs[i], isGenesis)
signerAccs[i], res = processSig(newCtx, signerAccs[i], stdSigs[i], signBytes, simulate, params)
if !res.IsOK() {
return newCtx, res, true
}
ak.SetAccount(newCtx, signerAccs[i])
}
// TODO: tx tags (?)
log.Debugln("NewAnteHandler:FINISHED")
return newCtx, sdk.Result{GasWanted: stdTx.Fee.GasWanted}, false //, GasUsed: newCtx.GasMeter().GasConsumed()}, false // continue...
}
}
// GetSignerAcc returns an account for a given address that is expected to sign
// a transaction.
func GetSignerAcc(ctx sdk.Context, ak AccountKeeper, addr sdk.AccAddress) (Account, sdk.Result) {
if acc := ak.GetAccount(ctx, addr); acc != nil {
return acc, sdk.Result{}
}
return nil, sdk.ErrUnknownAddress(fmt.Sprintf("account %s does not exist", addr)).Result()
}
// ValidateMemo validates the memo size.
func ValidateMemo(stdTx StdTx, params Params) sdk.Result {
memoLength := len(stdTx.GetMemo())
if uint64(memoLength) > params.MaxMemoCharacters {
return sdk.ErrMemoTooLarge(
fmt.Sprintf(
"maximum number of characters is %d but received %d characters",
params.MaxMemoCharacters, memoLength,
),
).Result()
}
return sdk.Result{}
}
// verify the signature and increment the sequence. If the account doesn't have
// a pubkey, set it.
func processSig(
ctx sdk.Context, acc Account, sig StdSignature, signBytes []byte, simulate bool, params Params,
) (updatedAcc Account, res sdk.Result) {
pubKey, res := ProcessPubKey(acc, sig, simulate)
if !res.IsOK() {
return nil, res
}
err := acc.SetPubKey(pubKey)
if err != nil {
return nil, sdk.ErrInternal("setting PubKey on signer's account").Result()
}
if simulate {
// Simulated txs should not contain a signature and are not required to
// contain a pubkey, so we must account for tx size of including a
// StdSignature (Amino encoding) and simulate gas consumption
// (assuming a SECP256k1 simulation key).
consumeSimSigGas(ctx.GasMeter(), pubKey, sig, params)
// log.Debugln("NewAnteHandler.processSig:simulated in")
}
if res := consumeSigVerificationGas(ctx.GasMeter(), sig.Signature, pubKey, params); !res.IsOK() {
return nil, res
}
if !simulate && !pubKey.VerifyBytes(signBytes, sig.Signature) {
return nil, sdk.ErrUnauthorized("signature verification failed; verify correct account number, account sequence and/or chain-id").Result()
}
if err := acc.SetSequence(acc.GetSequence() + 1); err != nil {
panic(err)
}
return acc, res
}
func consume
|
}
|
random_line_split
|
ante.go
|
019-10-24
// this is enabled again in order to handle non-htdfservice txs.
defer func() {
if r := recover(); r != nil {
switch rType := r.(type) {
case sdk.ErrorOutOfGas:
log := fmt.Sprintf(
"out of gas in location: %v; gasWanted: %d, gasUsed: %d",
rType.Descriptor, stdTx.Fee.GasWanted, newCtx.GasMeter().GasConsumed(),
)
res = sdk.ErrOutOfGas(log).Result()
res.GasWanted = stdTx.Fee.GasWanted
res.GasUsed = newCtx.GasMeter().GasConsumed()
abort = true
default:
panic(r)
}
}
}()
// junying-todo, 2019-11-13
// planed to be moved to baseapp.ValidateTx by
if err := tx.ValidateBasic(); err != nil {
return newCtx, err.Result(), true
}
// junying-todo, 2019-11-13
// check gas,gasprice for non-genesis block
if err := stdTx.ValidateFee(); err != nil && ctx.BlockHeight() != 0 {
return newCtx, err.Result(), true
}
// junying-todo, 2019-08-27
// conventional gas consuming isn't necessary anymore
// evm will replace it.
// junying-todo, 2019-10-24
// this is enabled again in order to handle non-htdfservice txs.
// junying-todo, 2019-11-13
// GasMetering Disabled, Now Constant Gas used for Staking Txs
if !ExistsMsgSend(tx) {
newCtx.GasMeter().UseGas(sdk.Gas(txparam.DefaultMsgGas*uint64(len(stdTx.Msgs))), "AnteHandler")
}
if res := ValidateMemo(stdTx, params); !res.IsOK() {
return newCtx, res, true
}
// stdSigs contains the sequence number, account number, and signatures.
// When simulating, this would just be a 0-length slice.
signerAddrs := stdTx.GetSigners()
signerAccs := make([]Account, len(signerAddrs))
isGenesis := ctx.BlockHeight() == 0
// fetch first signer, who's going to pay the fees
signerAccs[0], res = GetSignerAcc(newCtx, ak, signerAddrs[0])
if !res.IsOK() {
return newCtx, res, true
}
// junying-todo, 2019-11-19
// Deduct(DefaultMsgGas * len(Msgs)) for non-htdfservice msgs
if !stdTx.Fee.Amount().IsZero() && !ExistsMsgSend(tx) {
estimatedFee := EstimateFee(stdTx)
signerAccs[0], res = DeductFees(ctx.BlockHeader().Time, signerAccs[0], estimatedFee)
if !res.IsOK() {
return newCtx, res, true
}
fck.AddCollectedFees(newCtx, estimatedFee.Amount())
}
// stdSigs contains the sequence number, account number, and signatures.
// When simulating, this would just be a 0-length slice.
stdSigs := stdTx.GetSignatures()
for i := 0; i < len(stdSigs); i++ {
// skip the fee payer, account is cached and fees were deducted already
if i != 0 {
signerAccs[i], res = GetSignerAcc(newCtx, ak, signerAddrs[i])
if !res.IsOK() {
return newCtx, res, true
}
}
log.Debugln("&&&&&&&&&&&&&&&&&&&&", newCtx.ChainID())
// check signature, return account with incremented nonce
signBytes := GetSignBytes(newCtx.ChainID(), stdTx, signerAccs[i], isGenesis)
signerAccs[i], res = processSig(newCtx, signerAccs[i], stdSigs[i], signBytes, simulate, params)
if !res.IsOK() {
return newCtx, res, true
}
ak.SetAccount(newCtx, signerAccs[i])
}
// TODO: tx tags (?)
log.Debugln("NewAnteHandler:FINISHED")
return newCtx, sdk.Result{GasWanted: stdTx.Fee.GasWanted}, false //, GasUsed: newCtx.GasMeter().GasConsumed()}, false // continue...
}
}
// GetSignerAcc returns an account for a given address that is expected to sign
// a transaction.
func GetSignerAcc(ctx sdk.Context, ak AccountKeeper, addr sdk.AccAddress) (Account, sdk.Result) {
if acc := ak.GetAccount(ctx, addr); acc != nil {
return acc, sdk.Result{}
}
return nil, sdk.ErrUnknownAddress(fmt.Sprintf("account %s does not exist", addr)).Result()
}
// ValidateMemo validates the memo size.
func ValidateMemo(stdTx StdTx, params Params) sdk.Result {
memoLength := len(stdTx.GetMemo())
if uint64(memoLength) > params.MaxMemoCharacters {
return sdk.ErrMemoTooLarge(
fmt.Sprintf(
"maximum number of characters is %d but received %d characters",
params.MaxMemoCharacters, memoLength,
),
).Result()
}
return sdk.Result{}
}
// verify the signature and increment the sequence. If the account doesn't have
// a pubkey, set it.
func processSig(
ctx sdk.Context, acc Account, sig StdSignature, signBytes []byte, simulate bool, params Params,
) (updatedAcc Account, res sdk.Result) {
pubKey, res := ProcessPubKey(acc, sig, simulate)
if !res.IsOK() {
return nil, res
}
err := acc.SetPubKey(pubKey)
if err != nil {
return nil, sdk.ErrInternal("setting PubKey on signer's account").Result()
}
if simulate {
// Simulated txs should not contain a signature and are not required to
// contain a pubkey, so we must account for tx size of including a
// StdSignature (Amino encoding) and simulate gas consumption
// (assuming a SECP256k1 simulation key).
consumeSimSigGas(ctx.GasMeter(), pubKey, sig, params)
// log.Debugln("NewAnteHandler.processSig:simulated in")
}
if res := consumeSigVerificationGas(ctx.GasMeter(), sig.Signature, pubKey, params); !res.IsOK() {
return nil, res
}
if !simulate && !pubKey.VerifyBytes(signBytes, sig.Signature) {
return nil, sdk.ErrUnauthorized("signature verification failed; verify correct account number, account sequence and/or chain-id").Result()
}
if err := acc.SetSequence(acc.GetSequence() + 1); err != nil {
panic(err)
}
return acc, res
}
func consumeSimSigGas(gasmeter sdk.GasMeter, pubkey crypto.PubKey, sig StdSignature, params Params) {
simSig := StdSignature{PubKey: pubkey}
if len(sig.Signature) == 0 {
simSig.Signature = simSecp256k1Sig[:]
}
sigBz := msgCdc.MustMarshalBinaryLengthPrefixed(simSig)
cost := sdk.Gas(len(sigBz) + 6)
// If the pubkey is a multi-signature pubkey, then we estimate for the maximum
// number of signers.
if _, ok := pubkey.(multisig.PubKeyMultisigThreshold); ok {
cost *= params.TxSigLimit
}
gasmeter.ConsumeGas(params.TxSizeCostPerByte*cost, "txSize")
}
// ProcessPubKey verifies that the given account address matches that of the
// StdSignature. In addition, it will set the public key of the account if it
// has not been set.
func ProcessPubKey(acc Account, sig StdSignature, simulate bool) (crypto.PubKey, sdk.Result)
|
{
// If pubkey is not known for account, set it from the StdSignature.
pubKey := acc.GetPubKey()
if simulate {
// In simulate mode the transaction comes with no signatures, thus if the
// account's pubkey is nil, both signature verification and gasKVStore.Set()
// shall consume the largest amount, i.e. it takes more gas to verify
// secp256k1 keys than ed25519 ones.
if pubKey == nil {
return simSecp256k1Pubkey, sdk.Result{}
}
return pubKey, sdk.Result{}
}
if pubKey == nil {
pubKey = sig.PubKey
if pubKey == nil {
return nil, sdk.ErrInvalidPubKey("PubKey not found").Result()
}
|
identifier_body
|
|
main.py
|
(self, exc_type, exc_val, exc_tb):
super(MyLife, self).__exit__(exc_type, exc_val, exc_tb)
def get_data(self):
"""
Takes self.url (for a general MyLife search), scrapes the site data, and adds
it to the self.data_from_website DataFrame.
MyLife keeps its full data set on the page for the specific record, so self._gather_deep_data() can be used
to pull that deeper data.
:return: Boolean
"""
def _clean_search_hit(search_hit):
"""
Takes in a search result hit as a BeautifySoup tag and pulls out all the data to match the desired schema.
:param search_hit:
:return Dictionary: A dictionary with the cleaned data
"""
hit_name = search_hit.find(class_='hit-name')
hit_url = hit_name.get('href')
hit_id = hit_url.split('/')[-1]
name = hit_name.get_text().split(',')[0].title().split()
current_city = search_hit.find(class_='hit-location').get_text().upper()
# Find all Addresses for search result.
try:
address = search_hit.find(class_='hit-pastAddresses').find_all(class_='hit-values')
address = list({a.text.upper().replace('.', '') for a in address})
except AttributeError:
address = list()
# find the address that is most likely the current main address.
try:
address.insert(0, address.pop(address.index(current_city)))
except ValueError:
address.insert(0, current_city)
address = [
{
'@type': 'PostalAddress',
'addressLocality': locality.title(),
'addressRegion': region
} for locality, region in [a.split(', ') for a in address]]
work_location = {'@type': 'Place'}
try:
work_location['name'] = search_hit\
.find(class_='hit-work')\
.find(class_='hit-values')\
.get_text()\
.title()
except AttributeError:
work_location['name'] = ''
alumni_of = {'@type': 'EducationalOrganization'}
try:
alumni_of['name'] = search_hit\
.find(class_='hit-high-school')\
.find(class_='hit-values')\
.get_text().title()
except AttributeError:
pass
return {
'@id': hit_id,
'@type': 'Person',
'name': ' '.join(name),
'givenName': name[0],
'middleName': ' '.join(name[1:-1]),
'familyName': name[-1],
'url': hit_url,
'address': address,
'workLocation': work_location,
'alumniOf': alumni_of,
}
def _refine_search(search_str, options):
"""
Takes a list of WebElements and a search string, looks for string in the text of each WebElement, and
press the option if found. Returns Boolean for found status
:param search_str: str of the desired option.
:param options: list of WebElements from Beautify Soup that represents all of the available options.
:return:
"""
search_str = search_str.upper()
logging.info(f'Looking for \'{search_str}\'')
try:
for option in options:
option_text = option.text.upper()
logging.info(f'Option Checked: {option_text}')
if search_str in option_text:
option.click()
time.sleep(2)
logging.info(f'Option Selected: {option_text}')
return True
else:
return False
except AttributeError:
return True
except StaleElementReferenceException as e:
ChromeCrash(e)
with self.driver(executable_path=self.DRIVER_DIR) as driver:
driver.get(self.url)
"""
The CSS for the page doesn't show the State nor the City selector options if the page is too narrow,
so we need to make sure the browser is open wide enough for the CSS to make those options visible.
"""
driver.fullscreen_window()
# Refine the search by State
address_region = self.person.get('addressRegion', '')
address_region = STATES.get(address_region.upper(), address_region.upper())
region_options = driver\
.find_element_by_class_name("STATE")\
.find_elements_by_class_name("refinementList-text")
if not _refine_search(address_region, region_options):
return False
# Narrow the search by pressing a City option
address_locality = self.person.get('addressLocality').title()
locality_options = driver\
.find_element_by_class_name("CITY")\
.find_elements_by_class_name("refinementList-text")
if not _refine_search(address_locality, locality_options):
return False
"""
The Page Loads dynamically, so we need to scroll down the page to show all the search results. It needs to
be done in steps with a pause between movements to allow for loading.
Here it will first get the current location on the page, attempt to move down the page, and then check to
see if the location changed.
"""
if self.auto_scroll and len(driver.find_elements_by_class_name("ais-InfiniteHits-item")) > 15:
current_height, new_height = 0, driver.execute_script("return document.body.scrollHeight")
while new_height != current_height:
# Scroll down to the bottom of the page
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
current_height, new_height = new_height, driver.execute_script("return document.body.scrollHeight")
page_source = driver.page_source
page_soup = bs(page_source, 'html.parser')
search_results = list(page_soup.find_all(class_='ais-InfiniteHits-item'))
for i, search_result in enumerate(search_results):
search_results[i] = _clean_search_hit(search_result)
self.data_from_website = pd.DataFrame(search_results)
self.data_from_website.set_index('@id', inplace=True)
return True
def _deep_data(self, url):
"""
Takes a URL for a specific MyLife record, scrapes the JSON data and returns a dictionary.
:param url: url for which a deeper set of data is to be gathered.
:return:
"""
def _nested_persons(persons):
_persons = list()
for person_ in persons:
person_ = [r.text.split(', ') for r in person_.find_all(class_='default-text')]
person = {'name': person_[0][0].title()}
if len(person_[0]) == 2:
person['age'] = person_[0][1]
if len(person_[1]) > 0:
person['addressLocality'] = person_[1][0].title()
if len(person_[1]) == 2:
person['addressRegion'] = person_[1][1].upper()
_persons.append(person)
return _persons
with self.driver(self.DRIVER_DIR) as driver:
driver.get(url)
driver.fullscreen_window()
time.sleep(2)
txt = driver.page_source
soup = bs(txt, 'html.parser')
profile_data = soup.find(type="application/ld+json")
if profile_data is None:
self._raise_site_schema_change()
profile_data = profile_data.string
profile_data = json.loads(profile_data, strict=False)
profile_data['@id'] = profile_data.pop('@id').split('/')[-1]
try:
about = profile_data.pop('about')
for k, v in about.items():
profile_data[k] = v
except KeyError:
pass
name_ = profile_data.pop('name')
profile_data['name'] = name_
name_ = name_.split()
profile_data['givenName'] = name_[0]
profile_data['middleName'] = ' '.join(name_[1:-1])
profile_data['familyName'] = name_[-1]
if soup.find(class_='rep-vcard-score') is not None:
profile_data['reputation_score'] = "{min}-{max}".format(
min=soup.find(class_='rep-vcard-min').text,
max=soup.find(class_='rep-vcard-max').text
)
address = list()
address_ = soup.find_all(class_='card-address')
for a in address_:
street_address, locality_region_postal, *misc = [_.text for _ in a.find_all(class_='block-container')]
address_locality, locality_region_postal = locality_region_postal.split(',')
address_region, postal_code = locality_region_postal.split()
address.append({
'streetAddress': street_address,
'addressLocality': address_locality,
'addressRegion': address_region,
'postalCode': postal_code,
})
profile_data['address'] = address
personal_details = soup.find(class_='card-personal-details')
if personal_details is not None:
personal_details = personal_details.find_all(class_='item-container')
personal_details = [detail.text.split(': ') for detail in personal_details]
personal_details = [_ for _ in personal_details if len(_) == 2]
personal_details = {detail.lower().replace(' ', '_'): value for
detail, value in personal_details if value != 'Add Info'}
birth_date = personal_details.pop('date_of_birth')
if len(b
|
__exit__
|
identifier_name
|
|
main.py
|
(self):
"""
Takes self.url (for a general MyLife search), scrapes the site data, and adds
it to the self.data_from_website DataFrame.
MyLife keeps its full data set on the page for the specific record, so self._gather_deep_data() can be used
to pull that deeper data.
:return: Boolean
"""
def _clean_search_hit(search_hit):
"""
Takes in a search result hit as a BeautifySoup tag and pulls out all the data to match the desired schema.
:param search_hit:
:return Dictionary: A dictionary with the cleaned data
"""
hit_name = search_hit.find(class_='hit-name')
hit_url = hit_name.get('href')
hit_id = hit_url.split('/')[-1]
name = hit_name.get_text().split(',')[0].title().split()
current_city = search_hit.find(class_='hit-location').get_text().upper()
# Find all Addresses for search result.
try:
address = search_hit.find(class_='hit-pastAddresses').find_all(class_='hit-values')
address = list({a.text.upper().replace('.', '') for a in address})
except AttributeError:
address = list()
# find the address that is most likely the current main address.
try:
address.insert(0, address.pop(address.index(current_city)))
except ValueError:
address.insert(0, current_city)
address = [
{
'@type': 'PostalAddress',
'addressLocality': locality.title(),
'addressRegion': region
} for locality, region in [a.split(', ') for a in address]]
work_location = {'@type': 'Place'}
try:
work_location['name'] = search_hit\
.find(class_='hit-work')\
.find(class_='hit-values')\
.get_text()\
.title()
except AttributeError:
work_location['name'] = ''
alumni_of = {'@type': 'EducationalOrganization'}
try:
alumni_of['name'] = search_hit\
.find(class_='hit-high-school')\
.find(class_='hit-values')\
.get_text().title()
except AttributeError:
pass
return {
'@id': hit_id,
'@type': 'Person',
'name': ' '.join(name),
'givenName': name[0],
'middleName': ' '.join(name[1:-1]),
'familyName': name[-1],
'url': hit_url,
'address': address,
'workLocation': work_location,
'alumniOf': alumni_of,
}
def _refine_search(search_str, options):
"""
Takes a list of WebElements and a search string, looks for string in the text of each WebElement, and
press the option if found. Returns Boolean for found status
:param search_str: str of the desired option.
:param options: list of WebElements from Beautify Soup that represents all of the available options.
:return:
"""
search_str = search_str.upper()
logging.info(f'Looking for \'{search_str}\'')
try:
for option in options:
option_text = option.text.upper()
logging.info(f'Option Checked: {option_text}')
if search_str in option_text:
option.click()
time.sleep(2)
logging.info(f'Option Selected: {option_text}')
return True
else:
return False
except AttributeError:
return True
except StaleElementReferenceException as e:
ChromeCrash(e)
with self.driver(executable_path=self.DRIVER_DIR) as driver:
driver.get(self.url)
"""
The CSS for the page doesn't show the State nor the City selector options if the page is too narrow,
so we need to make sure the browser is open wide enough for the CSS to make those options visible.
"""
driver.fullscreen_window()
# Refine the search by State
address_region = self.person.get('addressRegion', '')
address_region = STATES.get(address_region.upper(), address_region.upper())
region_options = driver\
.find_element_by_class_name("STATE")\
.find_elements_by_class_name("refinementList-text")
if not _refine_search(address_region, region_options):
return False
# Narrow the search by pressing a City option
address_locality = self.person.get('addressLocality').title()
locality_options = driver\
.find_element_by_class_name("CITY")\
.find_elements_by_class_name("refinementList-text")
if not _refine_search(address_locality, locality_options):
return False
"""
The Page Loads dynamically, so we need to scroll down the page to show all the search results. It needs to
be done in steps with a pause between movements to allow for loading.
Here it will first get the current location on the page, attempt to move down the page, and then check to
see if the location changed.
"""
if self.auto_scroll and len(driver.find_elements_by_class_name("ais-InfiniteHits-item")) > 15:
current_height, new_height = 0, driver.execute_script("return document.body.scrollHeight")
while new_height != current_height:
# Scroll down to the bottom of the page
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
current_height, new_height = new_height, driver.execute_script("return document.body.scrollHeight")
page_source = driver.page_source
page_soup = bs(page_source, 'html.parser')
search_results = list(page_soup.find_all(class_='ais-InfiniteHits-item'))
for i, search_result in enumerate(search_results):
search_results[i] = _clean_search_hit(search_result)
self.data_from_website = pd.DataFrame(search_results)
self.data_from_website.set_index('@id', inplace=True)
return True
def _deep_data(self, url):
"""
Takes a URL for a specific MyLife record, scrapes the JSON data and returns a dictionary.
:param url: url for which a deeper set of data is to be gathered.
:return:
"""
def _nested_persons(persons):
_persons = list()
for person_ in persons:
person_ = [r.text.split(', ') for r in person_.find_all(class_='default-text')]
person = {'name': person_[0][0].title()}
if len(person_[0]) == 2:
person['age'] = person_[0][1]
if len(person_[1]) > 0:
person['addressLocality'] = person_[1][0].title()
if len(person_[1]) == 2:
person['addressRegion'] = person_[1][1].upper()
_persons.append(person)
return _persons
with self.driver(self.DRIVER_DIR) as driver:
driver.get(url)
driver.fullscreen_window()
time.sleep(2)
txt = driver.page_source
soup = bs(txt, 'html.parser')
profile_data = soup.find(type="application/ld+json")
if profile_data is None:
self._raise_site_schema_change()
profile_data = profile_data.string
profile_data = json.loads(profile_data, strict=False)
profile_data['@id'] = profile_data.pop('@id').split('/')[-1]
try:
about = profile_data.pop('about')
for k, v in about.items():
profile_data[k] = v
except KeyError:
pass
name_ = profile_data.pop('name')
profile_data['name'] = name_
name_ = name_.split()
profile_data['givenName'] = name_[0]
profile_data['middleName'] = ' '.join(name_[1:-1])
profile_data['familyName'] = name_[-1]
if soup.find(class_='rep-vcard-score') is not None:
profile_data['reputation_score'] = "{min}-{max}".format(
min=soup.find(class_='rep-vcard-min').text,
max=soup.find(class_='rep-vcard-max').text
)
address = list()
address_ = soup.find_all(class_='card-address')
for a in address_:
street_address, locality_region_postal, *misc = [_.text for _ in a.find_all(class_='block-container')]
address_locality, locality_region_postal = locality_region_postal.split(',')
address_region, postal_code = locality_region_postal.split()
address.append({
'streetAddress': street_address,
'addressLocality': address_locality,
'addressRegion': address_region,
'postalCode': postal_code,
})
profile_data['address'] = address
personal_details = soup.find(class_='card-personal-details')
if personal_details is not None:
|
personal_details = [_ for _ in personal_details if len(_) == 2]
personal_details = {detail.lower().replace(' ', '_'): value for
detail, value in personal_details if value != 'Add Info'}
birth_date = personal_details.pop('date_of_birth')
if len(birth_date) > 0:
profile_data['birthDate'] = birth_date
for key_, value_ in personal_details.items():
profile_data[key_] = value
|
personal_details = personal_details.find_all(class_='item-container')
personal_details = [detail.text.split(': ') for detail in personal_details]
|
random_line_split
|
main.py
|
(self):
"""
Takes self.url (for a general MyLife search), scrapes the site data, and adds
it to the self.data_from_website DataFrame.
MyLife keeps its full data set on the page for the specific record, so self._gather_deep_data() can be used
to pull that deeper data.
:return: Boolean
"""
def _clean_search_hit(search_hit):
"""
Takes in a search result hit as a BeautifySoup tag and pulls out all the data to match the desired schema.
:param search_hit:
:return Dictionary: A dictionary with the cleaned data
"""
hit_name = search_hit.find(class_='hit-name')
hit_url = hit_name.get('href')
hit_id = hit_url.split('/')[-1]
name = hit_name.get_text().split(',')[0].title().split()
current_city = search_hit.find(class_='hit-location').get_text().upper()
# Find all Addresses for search result.
try:
address = search_hit.find(class_='hit-pastAddresses').find_all(class_='hit-values')
address = list({a.text.upper().replace('.', '') for a in address})
except AttributeError:
address = list()
# find the address that is most likely the current main address.
try:
address.insert(0, address.pop(address.index(current_city)))
except ValueError:
address.insert(0, current_city)
address = [
{
'@type': 'PostalAddress',
'addressLocality': locality.title(),
'addressRegion': region
} for locality, region in [a.split(', ') for a in address]]
work_location = {'@type': 'Place'}
try:
work_location['name'] = search_hit\
.find(class_='hit-work')\
.find(class_='hit-values')\
.get_text()\
.title()
except AttributeError:
work_location['name'] = ''
alumni_of = {'@type': 'EducationalOrganization'}
try:
alumni_of['name'] = search_hit\
.find(class_='hit-high-school')\
.find(class_='hit-values')\
.get_text().title()
except AttributeError:
pass
return {
'@id': hit_id,
'@type': 'Person',
'name': ' '.join(name),
'givenName': name[0],
'middleName': ' '.join(name[1:-1]),
'familyName': name[-1],
'url': hit_url,
'address': address,
'workLocation': work_location,
'alumniOf': alumni_of,
}
def _refine_search(search_str, options):
"""
Takes a list of WebElements and a search string, looks for string in the text of each WebElement, and
press the option if found. Returns Boolean for found status
:param search_str: str of the desired option.
:param options: list of WebElements from Beautify Soup that represents all of the available options.
:return:
"""
search_str = search_str.upper()
logging.info(f'Looking for \'{search_str}\'')
try:
for option in options:
option_text = option.text.upper()
logging.info(f'Option Checked: {option_text}')
if search_str in option_text:
option.click()
time.sleep(2)
logging.info(f'Option Selected: {option_text}')
return True
else:
return False
except AttributeError:
return True
except StaleElementReferenceException as e:
ChromeCrash(e)
with self.driver(executable_path=self.DRIVER_DIR) as driver:
driver.get(self.url)
"""
The CSS for the page doesn't show the State nor the City selector options if the page is too narrow,
so we need to make sure the browser is open wide enough for the CSS to make those options visible.
"""
driver.fullscreen_window()
# Refine the search by State
address_region = self.person.get('addressRegion', '')
address_region = STATES.get(address_region.upper(), address_region.upper())
region_options = driver\
.find_element_by_class_name("STATE")\
.find_elements_by_class_name("refinementList-text")
if not _refine_search(address_region, region_options):
return False
# Narrow the search by pressing a City option
address_locality = self.person.get('addressLocality').title()
locality_options = driver\
.find_element_by_class_name("CITY")\
.find_elements_by_class_name("refinementList-text")
if not _refine_search(address_locality, locality_options):
return False
"""
The Page Loads dynamically, so we need to scroll down the page to show all the search results. It needs to
be done in steps with a pause between movements to allow for loading.
Here it will first get the current location on the page, attempt to move down the page, and then check to
see if the location changed.
"""
if self.auto_scroll and len(driver.find_elements_by_class_name("ais-InfiniteHits-item")) > 15:
current_height, new_height = 0, driver.execute_script("return document.body.scrollHeight")
while new_height != current_height:
# Scroll down to the bottom of the page
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
current_height, new_height = new_height, driver.execute_script("return document.body.scrollHeight")
page_source = driver.page_source
page_soup = bs(page_source, 'html.parser')
search_results = list(page_soup.find_all(class_='ais-InfiniteHits-item'))
for i, search_result in enumerate(search_results):
search_results[i] = _clean_search_hit(search_result)
self.data_from_website = pd.DataFrame(search_results)
self.data_from_website.set_index('@id', inplace=True)
return True
def _deep_data(self, url):
|
return _persons
with self.driver(self.DRIVER_DIR) as driver:
driver.get(url)
driver.fullscreen_window()
time.sleep(2)
txt = driver.page_source
soup = bs(txt, 'html.parser')
profile_data = soup.find(type="application/ld+json")
if profile_data is None:
self._raise_site_schema_change()
profile_data = profile_data.string
profile_data = json.loads(profile_data, strict=False)
profile_data['@id'] = profile_data.pop('@id').split('/')[-1]
try:
about = profile_data.pop('about')
for k, v in about.items():
profile_data[k] = v
except KeyError:
pass
name_ = profile_data.pop('name')
profile_data['name'] = name_
name_ = name_.split()
profile_data['givenName'] = name_[0]
profile_data['middleName'] = ' '.join(name_[1:-1])
profile_data['familyName'] = name_[-1]
if soup.find(class_='rep-vcard-score') is not None:
profile_data['reputation_score'] = "{min}-{max}".format(
min=soup.find(class_='rep-vcard-min').text,
max=soup.find(class_='rep-vcard-max').text
)
address = list()
address_ = soup.find_all(class_='card-address')
for a in address_:
street_address, locality_region_postal, *misc = [_.text for _ in a.find_all(class_='block-container')]
address_locality, locality_region_postal = locality_region_postal.split(',')
address_region, postal_code = locality_region_postal.split()
address.append({
'streetAddress': street_address,
'addressLocality': address_locality,
'addressRegion': address_region,
'postalCode': postal_code,
})
profile_data['address'] = address
personal_details = soup.find(class_='card-personal-details')
if personal_details is not None:
personal_details = personal_details.find_all(class_='item-container')
personal_details = [detail.text.split(': ') for detail in personal_details]
personal_details = [_ for _ in personal_details if len(_) == 2]
personal_details = {detail.lower().replace(' ', '_'): value for
detail, value in personal_details if value != 'Add Info'}
birth_date = personal_details.pop('date_of_birth')
if len(birth_date) > 0:
profile_data['birthDate'] = birth_date
for key_, value_ in personal_details.items():
profile_data[key_] = value
|
"""
Takes a URL for a specific MyLife record, scrapes the JSON data and returns a dictionary.
:param url: url for which a deeper set of data is to be gathered.
:return:
"""
def _nested_persons(persons):
_persons = list()
for person_ in persons:
person_ = [r.text.split(', ') for r in person_.find_all(class_='default-text')]
person = {'name': person_[0][0].title()}
if len(person_[0]) == 2:
person['age'] = person_[0][1]
if len(person_[1]) > 0:
person['addressLocality'] = person_[1][0].title()
if len(person_[1]) == 2:
person['addressRegion'] = person_[1][1].upper()
_persons.append(person)
|
identifier_body
|
main.py
|
1]),
'familyName': name[-1],
'url': hit_url,
'address': address,
'workLocation': work_location,
'alumniOf': alumni_of,
}
def _refine_search(search_str, options):
"""
Takes a list of WebElements and a search string, looks for string in the text of each WebElement, and
press the option if found. Returns Boolean for found status
:param search_str: str of the desired option.
:param options: list of WebElements from Beautify Soup that represents all of the available options.
:return:
"""
search_str = search_str.upper()
logging.info(f'Looking for \'{search_str}\'')
try:
for option in options:
option_text = option.text.upper()
logging.info(f'Option Checked: {option_text}')
if search_str in option_text:
option.click()
time.sleep(2)
logging.info(f'Option Selected: {option_text}')
return True
else:
return False
except AttributeError:
return True
except StaleElementReferenceException as e:
ChromeCrash(e)
with self.driver(executable_path=self.DRIVER_DIR) as driver:
driver.get(self.url)
"""
The CSS for the page doesn't show the State nor the City selector options if the page is too narrow,
so we need to make sure the browser is open wide enough for the CSS to make those options visible.
"""
driver.fullscreen_window()
# Refine the search by State
address_region = self.person.get('addressRegion', '')
address_region = STATES.get(address_region.upper(), address_region.upper())
region_options = driver\
.find_element_by_class_name("STATE")\
.find_elements_by_class_name("refinementList-text")
if not _refine_search(address_region, region_options):
return False
# Narrow the search by pressing a City option
address_locality = self.person.get('addressLocality').title()
locality_options = driver\
.find_element_by_class_name("CITY")\
.find_elements_by_class_name("refinementList-text")
if not _refine_search(address_locality, locality_options):
return False
"""
The Page Loads dynamically, so we need to scroll down the page to show all the search results. It needs to
be done in steps with a pause between movements to allow for loading.
Here it will first get the current location on the page, attempt to move down the page, and then check to
see if the location changed.
"""
if self.auto_scroll and len(driver.find_elements_by_class_name("ais-InfiniteHits-item")) > 15:
current_height, new_height = 0, driver.execute_script("return document.body.scrollHeight")
while new_height != current_height:
# Scroll down to the bottom of the page
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Wait to load page
time.sleep(SCROLL_PAUSE_TIME)
# Calculate new scroll height and compare with last scroll height
current_height, new_height = new_height, driver.execute_script("return document.body.scrollHeight")
page_source = driver.page_source
page_soup = bs(page_source, 'html.parser')
search_results = list(page_soup.find_all(class_='ais-InfiniteHits-item'))
for i, search_result in enumerate(search_results):
search_results[i] = _clean_search_hit(search_result)
self.data_from_website = pd.DataFrame(search_results)
self.data_from_website.set_index('@id', inplace=True)
return True
def _deep_data(self, url):
"""
Takes a URL for a specific MyLife record, scrapes the JSON data and returns a dictionary.
:param url: url for which a deeper set of data is to be gathered.
:return:
"""
def _nested_persons(persons):
_persons = list()
for person_ in persons:
person_ = [r.text.split(', ') for r in person_.find_all(class_='default-text')]
person = {'name': person_[0][0].title()}
if len(person_[0]) == 2:
person['age'] = person_[0][1]
if len(person_[1]) > 0:
person['addressLocality'] = person_[1][0].title()
if len(person_[1]) == 2:
person['addressRegion'] = person_[1][1].upper()
_persons.append(person)
return _persons
with self.driver(self.DRIVER_DIR) as driver:
driver.get(url)
driver.fullscreen_window()
time.sleep(2)
txt = driver.page_source
soup = bs(txt, 'html.parser')
profile_data = soup.find(type="application/ld+json")
if profile_data is None:
self._raise_site_schema_change()
profile_data = profile_data.string
profile_data = json.loads(profile_data, strict=False)
profile_data['@id'] = profile_data.pop('@id').split('/')[-1]
try:
about = profile_data.pop('about')
for k, v in about.items():
profile_data[k] = v
except KeyError:
pass
name_ = profile_data.pop('name')
profile_data['name'] = name_
name_ = name_.split()
profile_data['givenName'] = name_[0]
profile_data['middleName'] = ' '.join(name_[1:-1])
profile_data['familyName'] = name_[-1]
if soup.find(class_='rep-vcard-score') is not None:
profile_data['reputation_score'] = "{min}-{max}".format(
min=soup.find(class_='rep-vcard-min').text,
max=soup.find(class_='rep-vcard-max').text
)
address = list()
address_ = soup.find_all(class_='card-address')
for a in address_:
street_address, locality_region_postal, *misc = [_.text for _ in a.find_all(class_='block-container')]
address_locality, locality_region_postal = locality_region_postal.split(',')
address_region, postal_code = locality_region_postal.split()
address.append({
'streetAddress': street_address,
'addressLocality': address_locality,
'addressRegion': address_region,
'postalCode': postal_code,
})
profile_data['address'] = address
personal_details = soup.find(class_='card-personal-details')
if personal_details is not None:
personal_details = personal_details.find_all(class_='item-container')
personal_details = [detail.text.split(': ') for detail in personal_details]
personal_details = [_ for _ in personal_details if len(_) == 2]
personal_details = {detail.lower().replace(' ', '_'): value for
detail, value in personal_details if value != 'Add Info'}
birth_date = personal_details.pop('date_of_birth')
if len(birth_date) > 0:
profile_data['birthDate'] = birth_date
for key_, value_ in personal_details.items():
profile_data[key_] = value_
# Education
schools_ = soup.find(class_='card-education')
if schools_ is not None:
schools = list()
schools_ = schools_.find_all(class_='card-content')
for school in schools_:
school = [detail.text.split(': ') for detail in school.find_all(class_='item-container')]
school = {detail.lower().replace(' ', '_'): value for
detail, value in school if value != 'Add Info'}
if len(school) == 0:
continue
school['@type'] = 'EducationalOrganization'
school['name'] = school.pop('school')
school['streetAddress'], school['addressLocality'] = school.pop('city').split(', ')
schools.append(school)
# Work
employers = soup.find(class_='card-job')
if employers is not None:
works_for = list()
employers = employers.find_all(class_='card-content')
for employer in employers:
employer = [detail.text.split(': ') for detail in employer.find_all(class_='item-container')]
employer = {detail.lower().replace(' ', '_'): value for
detail, value in employer if value != 'Add Info'}
if len(employer) == 0:
continue
employer['@type'] = 'Organization'
try:
employer['name'] = employer.pop('company')
except KeyError:
continue
if len(employer.get('city', '')) > 0:
employer['streetAddress'], employer['addressLocality'] = employer.pop('city').split(', ')
works_for.append(employer)
if len(works_for) > 0:
profile_data['worksFor'] = works_for
# Automobiles
automobiles = soup.find(class_='card-auto')
if automobiles is not None:
owns = list()
automobiles = automobiles.find_all(class_='card-content')
for automobile in automobiles:
|
automobile = [detail.text.split(': ') for detail in automobile.find_all(class_='item-container')]
automobile = {detail.lower().replace(' ', '_'): value for
detail, value in automobile if value != 'Add Info'}
if len(automobile) == 0:
continue
automobile['@type'] = 'Product'
automobile['model'] = ' '.join([
automobile.pop('year'),
automobile.pop('make'),
automobile.pop('model')
])
owns.append(automobile)
|
conditional_block
|
|
mod.rs
|
is more consistent
// FIXME: Only 64-bit architectures are supported by the below values
data.insert(Type::u8, TypeTableEntry::new(1, 1));
data.insert(Type::u16, TypeTableEntry::new(2, 2));
data.insert(Type::u32, TypeTableEntry::new(4, 4));
data.insert(Type::u64, TypeTableEntry::new(8, 8));
data.insert(Type::u128, TypeTableEntry::new(16, 8));
data.insert(Type::i8, TypeTableEntry::new(1, 1));
data.insert(Type::i16, TypeTableEntry::new(2, 2));
data.insert(Type::i32, TypeTableEntry::new(4, 4));
data.insert(Type::i64, TypeTableEntry::new(8, 8));
data.insert(Type::i128, TypeTableEntry::new(16, 8));
data.insert(Type::f32, TypeTableEntry::new(4, 4));
data.insert(Type::f64, TypeTableEntry::new(8, 8));
data.insert(Type::bool, TypeTableEntry::new(1, 1));
data.insert(Type::Unit, TypeTableEntry::new(0, 1));
Self { data }
}
fn insert(&mut self, t: &Type, entry: TypeTableEntry) -> Result<(), String> {
match self.data.insert(t.clone(), entry) {
Some(_) => Err(format!("Type {} already exists", t.clone())),
None => Ok(()),
}
}
fn assert_valid(&self, t: &Type) -> Result<(), String> {
match t {
// Strip away references to check the underlying type
Type::Reference { ty, .. } => Ok(self.assert_valid(ty)?),
// Check all contained types
Type::Tuple(types) => {
// TODO: All types can be checked (rather than stopping at first error)
// Just store all errors, then build an error string
for ty in types {
let result = self.assert_valid(ty);
if result.is_err() {
return result;
}
}
Ok(())
}
// Base types
_ => {
if self.data.contains_key(t) {
Ok(())
} else {
Err(format!("Type `{}` is not valid", t))
}
}
}
}
/// Returns alignment of the type in bytes
fn alignment_of(&self, t: &Type) -> usize {
match t {
// TODO: Alignment should be same as pointer type
Type::Reference { ty, .. } => todo!("need pointer type stuff"),
// TODO: Tuples should align same as structs
Type::Tuple(types) => todo!("tuple alignment"),
_ => self.data.get(t).expect("alignment_of").alignment,
}
}
/// Returns the size of the type in bytes
pub fn size_of(&self, t: &Type) -> usize {
self.data.get(t).unwrap().size
}
}
///////////////////// SCOPES + VARIABLES /////////////////////
#[derive(Debug)]
pub enum MemoryUsage {
/// The variable is new -> requires allocation
/// e.g.: `let x: u32 = 7;`
StackSlot,
/// The variable is a struct being returned
/// e.g.: `return Type {...};`
StructReturn,
/// Aliases an existing variable -> use its allocation
/// e.g.: `let x: u32 = y;`
Alias(String),
/// The variable is allocated elsewhere before being passed as a param
/// e.g.: `function(12, x);`
FunctionParam,
|
// e.g.: `let x: u32 = y.a;`
// FieldAlias(),
}
pub struct AllocationTable {
// Map of ((function_name, variable name) -> variable's usage)
pub allocations: HashMap<(String, String), MemoryUsage>,
}
impl AllocationTable {
pub fn new() -> Self {
Self {
allocations: HashMap::new(),
}
}
pub fn insert(&mut self, function: String, variable: String, usage: MemoryUsage) -> Result<(), String> {
if let Some(_existing) = self.allocations.insert((function.clone(), variable.clone()), usage) {
return Err(format!("Variable {} is already defined in function {}", variable, function));
}
Ok(())
}
pub fn get_usage(&mut self, function: &str, variable: &str) -> &MemoryUsage {
// NOTE: This should always be valid
self.allocations.get(&(function.to_owned(), variable.to_owned())).expect("get_usage")
}
}
struct VariableData {
/// Type of the variable
pub ty: Type,
/// What allocation this variable needs
pub memory_usage: MemoryUsage,
/// Is the variable mutable
pub mutable: bool,
}
impl VariableData {
fn new(ty: Type, memory_usage: MemoryUsage, mutable: bool) -> Self {
Self { ty, memory_usage, mutable }
}
}
struct Scope {
/// **This scope's** map of (variable name -> data)
variables: HashMap<String, VariableData>,
}
impl Scope {
fn new() -> Self {
Self {
variables: HashMap::new(),
}
}
fn get_var_data(&self, var: &str) -> &VariableData {
// NOTE: This operation should always succeed
self.variables.get(var).expect("get_var_data")
}
fn get_var_data_mut(&mut self, var: &str) -> &mut VariableData {
// NOTE: This operation should always succeed
self.variables.get_mut(var).expect("get_var_data_mut")
}
fn insert_var_data(&mut self, name: String, var: VariableData) {
// NOTE: This operation should never overwrite existing
self.variables.insert(name, var);
}
}
/// Uses alias analysis to determine stack slot allocations and struct return slot usage
struct Scopes {
/// Each element represents a subsequently nested scope
scopes: Vec<Scope>,
/// Map of (variable name -> its scope)
all_variables: HashMap<String, usize>,
num_scopes: usize,
}
impl Scopes {
fn new() -> Self {
Self {
scopes: Vec::new(),
all_variables: HashMap::new(),
num_scopes: 0,
}
}
fn push_scope(&mut self) {
self.scopes.push(Scope::new());
self.num_scopes += 1;
}
fn pop_scope(&mut self) -> Scope {
// NOTE: These operations should always succeed
let removed_scope = self.scopes.pop().expect("pop_scope");
for key in removed_scope.variables.keys() {
self.all_variables.remove(key);
}
self.num_scopes -= 1;
removed_scope
}
fn current_index(&self) -> usize {
self.num_scopes - 1
}
fn current_scope(&mut self) -> &mut Scope {
let i = self.current_index();
&mut self.scopes[i]
}
// TODO: Field aliasing
// TODO: Handle shadowing
fn add_var_to_scope(&mut self, name: String, mutable: bool, ty: Type, memory_usage: MemoryUsage) -> Result<(), String> {
// if name exists already
if let Some(scope_index) = self.all_variables.insert(name.clone(), self.current_index()) {
// Name exists in the current scope
if scope_index == self.current_index() {
return Err(format!("Variable `{}` is already defined in this scope", name));
} else {
// TODO: This
todo!("Nested scope shadowing")
}
}
self.current_scope().insert_var_data(name, VariableData::new(ty, memory_usage, mutable));
Ok(())
}
// TODO: Handle shadowing
fn get_variable(&self, name: &str) -> Result<&VariableData, String> {
if let Some(&index) = self.all_variables.get(name) {
return Ok(self.scopes[index].get_var_data(name));
}
Err(format!("No variable `{}` in scope", name))
}
fn get_variable_mut(&mut self, name: &str) -> Result<&mut VariableData, String> {
if let Some(&index) = self.all_variables.get(name) {
return Ok(self.scopes[index].get_var_data_mut(name));
}
Err(format!("No variable `{}` in scope", name))
}
// NOTE: Program is valid at this point. No safety checks needed
/// Uses aliases to convert the return variable's generic allocation to struct-return allocation
/// Target variable is always in the current scope.
fn signal_return_variable(&mut self, mut target: String) {
let mut current;
// Traverse the alias graph to find the true variable being returned.
loop {
current = self.current_scope().get_var_data_mut(&target);
match ¤t.memory_usage {
// keep looking for root
MemoryUsage::Alias(next)
|
// TODO: References an existing variable -> ??
// e.g.: `let x: &u32 = &y;`
// Borrow(&'input str),
// TODO: Aliases a field of an existing variable -> ??
|
random_line_split
|
mod.rs
|
is more consistent
// FIXME: Only 64-bit architectures are supported by the below values
data.insert(Type::u8, TypeTableEntry::new(1, 1));
data.insert(Type::u16, TypeTableEntry::new(2, 2));
data.insert(Type::u32, TypeTableEntry::new(4, 4));
data.insert(Type::u64, TypeTableEntry::new(8, 8));
data.insert(Type::u128, TypeTableEntry::new(16, 8));
data.insert(Type::i8, TypeTableEntry::new(1, 1));
data.insert(Type::i16, TypeTableEntry::new(2, 2));
data.insert(Type::i32, TypeTableEntry::new(4, 4));
data.insert(Type::i64, TypeTableEntry::new(8, 8));
data.insert(Type::i128, TypeTableEntry::new(16, 8));
data.insert(Type::f32, TypeTableEntry::new(4, 4));
data.insert(Type::f64, TypeTableEntry::new(8, 8));
data.insert(Type::bool, TypeTableEntry::new(1, 1));
data.insert(Type::Unit, TypeTableEntry::new(0, 1));
Self { data }
}
fn insert(&mut self, t: &Type, entry: TypeTableEntry) -> Result<(), String> {
match self.data.insert(t.clone(), entry) {
Some(_) => Err(format!("Type {} already exists", t.clone())),
None => Ok(()),
}
}
fn assert_valid(&self, t: &Type) -> Result<(), String> {
match t {
// Strip away references to check the underlying type
Type::Reference { ty, .. } => Ok(self.assert_valid(ty)?),
// Check all contained types
Type::Tuple(types) => {
// TODO: All types can be checked (rather than stopping at first error)
// Just store all errors, then build an error string
for ty in types {
let result = self.assert_valid(ty);
if result.is_err() {
return result;
}
}
Ok(())
}
// Base types
_ => {
if self.data.contains_key(t) {
Ok(())
} else {
Err(format!("Type `{}` is not valid", t))
}
}
}
}
/// Returns alignment of the type in bytes
fn alignment_of(&self, t: &Type) -> usize {
match t {
// TODO: Alignment should be same as pointer type
Type::Reference { ty, .. } => todo!("need pointer type stuff"),
// TODO: Tuples should align same as structs
Type::Tuple(types) => todo!("tuple alignment"),
_ => self.data.get(t).expect("alignment_of").alignment,
}
}
/// Returns the size of the type in bytes
pub fn size_of(&self, t: &Type) -> usize {
self.data.get(t).unwrap().size
}
}
///////////////////// SCOPES + VARIABLES /////////////////////
#[derive(Debug)]
pub enum MemoryUsage {
/// The variable is new -> requires allocation
/// e.g.: `let x: u32 = 7;`
StackSlot,
/// The variable is a struct being returned
/// e.g.: `return Type {...};`
StructReturn,
/// Aliases an existing variable -> use its allocation
/// e.g.: `let x: u32 = y;`
Alias(String),
/// The variable is allocated elsewhere before being passed as a param
/// e.g.: `function(12, x);`
FunctionParam,
// TODO: References an existing variable -> ??
// e.g.: `let x: &u32 = &y;`
// Borrow(&'input str),
// TODO: Aliases a field of an existing variable -> ??
// e.g.: `let x: u32 = y.a;`
// FieldAlias(),
}
pub struct AllocationTable {
// Map of ((function_name, variable name) -> variable's usage)
pub allocations: HashMap<(String, String), MemoryUsage>,
}
impl AllocationTable {
pub fn new() -> Self {
Self {
allocations: HashMap::new(),
}
}
pub fn insert(&mut self, function: String, variable: String, usage: MemoryUsage) -> Result<(), String> {
if let Some(_existing) = self.allocations.insert((function.clone(), variable.clone()), usage) {
return Err(format!("Variable {} is already defined in function {}", variable, function));
}
Ok(())
}
pub fn get_usage(&mut self, function: &str, variable: &str) -> &MemoryUsage {
// NOTE: This should always be valid
self.allocations.get(&(function.to_owned(), variable.to_owned())).expect("get_usage")
}
}
struct VariableData {
/// Type of the variable
pub ty: Type,
/// What allocation this variable needs
pub memory_usage: MemoryUsage,
/// Is the variable mutable
pub mutable: bool,
}
impl VariableData {
fn new(ty: Type, memory_usage: MemoryUsage, mutable: bool) -> Self {
Self { ty, memory_usage, mutable }
}
}
struct Scope {
/// **This scope's** map of (variable name -> data)
variables: HashMap<String, VariableData>,
}
impl Scope {
fn new() -> Self {
Self {
variables: HashMap::new(),
}
}
fn get_var_data(&self, var: &str) -> &VariableData {
// NOTE: This operation should always succeed
self.variables.get(var).expect("get_var_data")
}
fn get_var_data_mut(&mut self, var: &str) -> &mut VariableData {
// NOTE: This operation should always succeed
self.variables.get_mut(var).expect("get_var_data_mut")
}
fn insert_var_data(&mut self, name: String, var: VariableData) {
// NOTE: This operation should never overwrite existing
self.variables.insert(name, var);
}
}
/// Uses alias analysis to determine stack slot allocations and struct return slot usage
struct Scopes {
/// Each element represents a subsequently nested scope
scopes: Vec<Scope>,
/// Map of (variable name -> its scope)
all_variables: HashMap<String, usize>,
num_scopes: usize,
}
impl Scopes {
fn new() -> Self {
Self {
scopes: Vec::new(),
all_variables: HashMap::new(),
num_scopes: 0,
}
}
fn push_scope(&mut self) {
self.scopes.push(Scope::new());
self.num_scopes += 1;
}
fn pop_scope(&mut self) -> Scope {
// NOTE: These operations should always succeed
let removed_scope = self.scopes.pop().expect("pop_scope");
for key in removed_scope.variables.keys() {
self.all_variables.remove(key);
}
self.num_scopes -= 1;
removed_scope
}
fn current_index(&self) -> usize {
self.num_scopes - 1
}
fn current_scope(&mut self) -> &mut Scope {
let i = self.current_index();
&mut self.scopes[i]
}
// TODO: Field aliasing
// TODO: Handle shadowing
fn add_var_to_scope(&mut self, name: String, mutable: bool, ty: Type, memory_usage: MemoryUsage) -> Result<(), String> {
// if name exists already
if let Some(scope_index) = self.all_variables.insert(name.clone(), self.current_index()) {
// Name exists in the current scope
if scope_index == self.current_index() {
return Err(format!("Variable `{}` is already defined in this scope", name));
} else {
// TODO: This
todo!("Nested scope shadowing")
}
}
self.current_scope().insert_var_data(name, VariableData::new(ty, memory_usage, mutable));
Ok(())
}
// TODO: Handle shadowing
fn get_variable(&self, name: &str) -> Result<&VariableData, String> {
if let Some(&index) = self.all_variables.get(name) {
return Ok(self.scopes[index].get_var_data(name));
}
Err(format!("No variable `{}` in scope", name))
}
fn
|
(&mut self, name: &str) -> Result<&mut VariableData, String> {
if let Some(&index) = self.all_variables.get(name) {
return Ok(self.scopes[index].get_var_data_mut(name));
}
Err(format!("No variable `{}` in scope", name))
}
// NOTE: Program is valid at this point. No safety checks needed
/// Uses aliases to convert the return variable's generic allocation to struct-return allocation
/// Target variable is always in the current scope.
fn signal_return_variable(&mut self, mut target: String) {
let mut current;
// Traverse the alias graph to find the true variable being returned.
loop {
current = self.current_scope().get_var_data_mut(&target);
match ¤t.memory_usage {
// keep looking for root
MemoryUsage::Alias
|
get_variable_mut
|
identifier_name
|
mod.rs
|
is more consistent
// FIXME: Only 64-bit architectures are supported by the below values
data.insert(Type::u8, TypeTableEntry::new(1, 1));
data.insert(Type::u16, TypeTableEntry::new(2, 2));
data.insert(Type::u32, TypeTableEntry::new(4, 4));
data.insert(Type::u64, TypeTableEntry::new(8, 8));
data.insert(Type::u128, TypeTableEntry::new(16, 8));
data.insert(Type::i8, TypeTableEntry::new(1, 1));
data.insert(Type::i16, TypeTableEntry::new(2, 2));
data.insert(Type::i32, TypeTableEntry::new(4, 4));
data.insert(Type::i64, TypeTableEntry::new(8, 8));
data.insert(Type::i128, TypeTableEntry::new(16, 8));
data.insert(Type::f32, TypeTableEntry::new(4, 4));
data.insert(Type::f64, TypeTableEntry::new(8, 8));
data.insert(Type::bool, TypeTableEntry::new(1, 1));
data.insert(Type::Unit, TypeTableEntry::new(0, 1));
Self { data }
}
fn insert(&mut self, t: &Type, entry: TypeTableEntry) -> Result<(), String> {
match self.data.insert(t.clone(), entry) {
Some(_) => Err(format!("Type {} already exists", t.clone())),
None => Ok(()),
}
}
fn assert_valid(&self, t: &Type) -> Result<(), String> {
match t {
// Strip away references to check the underlying type
Type::Reference { ty, .. } => Ok(self.assert_valid(ty)?),
// Check all contained types
Type::Tuple(types) => {
// TODO: All types can be checked (rather than stopping at first error)
// Just store all errors, then build an error string
for ty in types {
let result = self.assert_valid(ty);
if result.is_err() {
return result;
}
}
Ok(())
}
// Base types
_ => {
if self.data.contains_key(t)
|
else {
Err(format!("Type `{}` is not valid", t))
}
}
}
}
/// Returns alignment of the type in bytes
fn alignment_of(&self, t: &Type) -> usize {
match t {
// TODO: Alignment should be same as pointer type
Type::Reference { ty, .. } => todo!("need pointer type stuff"),
// TODO: Tuples should align same as structs
Type::Tuple(types) => todo!("tuple alignment"),
_ => self.data.get(t).expect("alignment_of").alignment,
}
}
/// Returns the size of the type in bytes
pub fn size_of(&self, t: &Type) -> usize {
self.data.get(t).unwrap().size
}
}
///////////////////// SCOPES + VARIABLES /////////////////////
#[derive(Debug)]
pub enum MemoryUsage {
/// The variable is new -> requires allocation
/// e.g.: `let x: u32 = 7;`
StackSlot,
/// The variable is a struct being returned
/// e.g.: `return Type {...};`
StructReturn,
/// Aliases an existing variable -> use its allocation
/// e.g.: `let x: u32 = y;`
Alias(String),
/// The variable is allocated elsewhere before being passed as a param
/// e.g.: `function(12, x);`
FunctionParam,
// TODO: References an existing variable -> ??
// e.g.: `let x: &u32 = &y;`
// Borrow(&'input str),
// TODO: Aliases a field of an existing variable -> ??
// e.g.: `let x: u32 = y.a;`
// FieldAlias(),
}
pub struct AllocationTable {
// Map of ((function_name, variable name) -> variable's usage)
pub allocations: HashMap<(String, String), MemoryUsage>,
}
impl AllocationTable {
pub fn new() -> Self {
Self {
allocations: HashMap::new(),
}
}
pub fn insert(&mut self, function: String, variable: String, usage: MemoryUsage) -> Result<(), String> {
if let Some(_existing) = self.allocations.insert((function.clone(), variable.clone()), usage) {
return Err(format!("Variable {} is already defined in function {}", variable, function));
}
Ok(())
}
pub fn get_usage(&mut self, function: &str, variable: &str) -> &MemoryUsage {
// NOTE: This should always be valid
self.allocations.get(&(function.to_owned(), variable.to_owned())).expect("get_usage")
}
}
struct VariableData {
/// Type of the variable
pub ty: Type,
/// What allocation this variable needs
pub memory_usage: MemoryUsage,
/// Is the variable mutable
pub mutable: bool,
}
impl VariableData {
fn new(ty: Type, memory_usage: MemoryUsage, mutable: bool) -> Self {
Self { ty, memory_usage, mutable }
}
}
struct Scope {
/// **This scope's** map of (variable name -> data)
variables: HashMap<String, VariableData>,
}
impl Scope {
fn new() -> Self {
Self {
variables: HashMap::new(),
}
}
fn get_var_data(&self, var: &str) -> &VariableData {
// NOTE: This operation should always succeed
self.variables.get(var).expect("get_var_data")
}
fn get_var_data_mut(&mut self, var: &str) -> &mut VariableData {
// NOTE: This operation should always succeed
self.variables.get_mut(var).expect("get_var_data_mut")
}
fn insert_var_data(&mut self, name: String, var: VariableData) {
// NOTE: This operation should never overwrite existing
self.variables.insert(name, var);
}
}
/// Uses alias analysis to determine stack slot allocations and struct return slot usage
struct Scopes {
/// Each element represents a subsequently nested scope
scopes: Vec<Scope>,
/// Map of (variable name -> its scope)
all_variables: HashMap<String, usize>,
num_scopes: usize,
}
impl Scopes {
fn new() -> Self {
Self {
scopes: Vec::new(),
all_variables: HashMap::new(),
num_scopes: 0,
}
}
fn push_scope(&mut self) {
self.scopes.push(Scope::new());
self.num_scopes += 1;
}
fn pop_scope(&mut self) -> Scope {
// NOTE: These operations should always succeed
let removed_scope = self.scopes.pop().expect("pop_scope");
for key in removed_scope.variables.keys() {
self.all_variables.remove(key);
}
self.num_scopes -= 1;
removed_scope
}
fn current_index(&self) -> usize {
self.num_scopes - 1
}
fn current_scope(&mut self) -> &mut Scope {
let i = self.current_index();
&mut self.scopes[i]
}
// TODO: Field aliasing
// TODO: Handle shadowing
fn add_var_to_scope(&mut self, name: String, mutable: bool, ty: Type, memory_usage: MemoryUsage) -> Result<(), String> {
// if name exists already
if let Some(scope_index) = self.all_variables.insert(name.clone(), self.current_index()) {
// Name exists in the current scope
if scope_index == self.current_index() {
return Err(format!("Variable `{}` is already defined in this scope", name));
} else {
// TODO: This
todo!("Nested scope shadowing")
}
}
self.current_scope().insert_var_data(name, VariableData::new(ty, memory_usage, mutable));
Ok(())
}
// TODO: Handle shadowing
fn get_variable(&self, name: &str) -> Result<&VariableData, String> {
if let Some(&index) = self.all_variables.get(name) {
return Ok(self.scopes[index].get_var_data(name));
}
Err(format!("No variable `{}` in scope", name))
}
fn get_variable_mut(&mut self, name: &str) -> Result<&mut VariableData, String> {
if let Some(&index) = self.all_variables.get(name) {
return Ok(self.scopes[index].get_var_data_mut(name));
}
Err(format!("No variable `{}` in scope", name))
}
// NOTE: Program is valid at this point. No safety checks needed
/// Uses aliases to convert the return variable's generic allocation to struct-return allocation
/// Target variable is always in the current scope.
fn signal_return_variable(&mut self, mut target: String) {
let mut current;
// Traverse the alias graph to find the true variable being returned.
loop {
current = self.current_scope().get_var_data_mut(&target);
match ¤t.memory_usage {
// keep looking for root
MemoryUsage::Alias
|
{
Ok(())
}
|
conditional_block
|
rtppacket.go
|
either an SR or RR
// identifier
if this.header.marker != 0 {
if this.header.payloadtype == (RTP_RTCPTYPE_SR & 127) { // don't check high bit (this was the marker!!)
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
if this.header.payloadtype == (RTP_RTCPTYPE_RR & 127) {
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
}
var numpadbytes, payloadoffset, payloadlength int
payloadoffset = SIZEOF_RTPHEADER + 4*int(this.header.csrccount)
if this.header.extension != 0 { // got header extension
this.extension = NewRTPExtension()
if err := this.extension.Parse(this.packet[payloadoffset:]); err != nil {
return err
}
payloadoffset += SIZEOF_RTPEXTENSION + 4*int(this.extension.length)
} else {
this.extension = nil
}
if this.header.padding != 0 { // adjust payload length to take padding into account
numpadbytes = int(this.packet[len(this.packet)-1]) // last byte contains number of padding bytes
if numpadbytes > len(this.packet)-payloadoffset {
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
} else {
numpadbytes = 0
}
payloadlength = len(this.packet) - numpadbytes - payloadoffset
if payloadlength < 0 {
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
return nil
}
/** Creates a new buffer for an RTP packet and fills in the fields according to the specified parameters.
* If \c maxpacksize is not equal to zero, an error is generated if the total packet size would exceed
* \c maxpacksize. The arguments of the constructor are self-explanatory. Note that the size of a header
* extension is specified in a number of 32-bit words. A memory manager can be installed.
* This constructor is similar to the other constructor, but here data is stored in an external buffer
* \c buffer with size \c buffersize. */
func NewPacket(payloadtype uint8,
payloaddata []byte,
seqnr uint16,
timestamp uint32,
ssrc uint32,
gotmarker bool,
numcsrcs uint8,
csrcs []uint32,
gotextension bool,
extensionid uint16,
extensionlen uint16,
extensiondata []uint32) *RTPPacket {
this := &RTPPacket{}
this.receivetime = &RTPTime{0, 0}
if err := this.BuildPacket(payloadtype,
payloaddata,
seqnr,
timestamp,
ssrc,
gotmarker,
numcsrcs,
csrcs,
gotextension,
extensionid,
extensionlen,
extensiondata); err != nil {
return nil
}
return this
}
func (this *RTPPacket) BuildPacket(payloadtype uint8,
payloaddata []byte,
seqnr uint16,
timestamp uint32,
ssrc uint32,
gotmarker bool,
numcsrcs uint8,
csrcs []uint32,
gotextension bool,
extensionid uint16,
extensionlen uint16,
extensiondata []uint32) error {
if numcsrcs > RTP_MAXCSRCS {
return errors.New("ERR_RTP_PACKET_TOOMANYCSRCS")
}
if payloadtype > 127 { // high bit should not be used
return errors.New("ERR_RTP_PACKET_BADPAYLOADTYPE")
}
if payloadtype == 72 || payloadtype == 73 { // could cause confusion with rtcp types
return errors.New("ERR_RTP_PACKET_BADPAYLOADTYPE")
}
var packetlength, packetoffset int
packetlength = SIZEOF_RTPHEADER
packetlength += int(numcsrcs) * 4 //sizeof(uint32_t)*((size_t)
if gotextension {
packetlength += SIZEOF_RTPEXTENSION //(RTPExtensionHeader);
packetlength += int(extensionlen) * 4 //sizeof(uint32_t)*((size_t)
}
packetlength += len(payloaddata) //payloadlen;
this.packet = make([]byte, packetlength)
// Ok, now we'll just fill in...
this.header = NewRTPHeader()
this.header.version = RTP_VERSION
this.header.padding = 0
if gotextension {
this.header.extension = 1
} else {
this.header.extension = 0
}
this.header.csrccount = numcsrcs
if gotmarker {
this.header.marker = 1
} else {
this.header.marker = 0
}
this.header.payloadtype = payloadtype & 127
this.header.sequencenumber = seqnr
this.header.timestamp = timestamp
this.header.ssrc = ssrc
if numcsrcs != 0 {
this.header.csrc = make([]uint32, numcsrcs)
for i := uint8(0); i < numcsrcs; i++ {
this.header.csrc[i] = csrcs[i] //htonl(csrcs[i]);
}
}
packetoffset = SIZEOF_RTPHEADER + int(numcsrcs)*4
copy(this.packet[0:packetoffset], this.header.Encode())
if gotextension {
this.extension = NewRTPExtension()
this.extension.id = extensionid
this.extension.length = extensionlen //sizeof(uint32_t);
if extensionlen != 0 {
this.extension.data = make([]uint32, extensionlen)
for i := uint16(0); i < extensionlen; i++ {
this.extension.data[i] = extensiondata[i]
}
}
copy(this.packet[packetoffset:packetoffset+SIZEOF_RTPEXTENSION+int(extensionlen)*4], this.extension.Encode())
packetoffset += SIZEOF_RTPEXTENSION + int(extensionlen)*4
} else {
this.extension = nil
}
this.payload = make([]byte, len(payloaddata))
copy(this.payload, payloaddata)
copy(this.packet[packetoffset:packetoffset+len(payloaddata)], payloaddata)
return nil
}
/** Returns \c true if the RTP packet has a header extension and \c false otherwise. */
func (this *RTPPacket) HasExtension() bool {
return this.header.extension != 0
}
/** Returns \c true if the marker bit was set and \c false otherwise. */
func (this *RTPPacket) HasMarker() bool {
return this.header.marker != 0
}
/** Returns the number of CSRCs contained in this packet. */
func (this *RTPPacket) GetCSRCCount() uint8 {
return this.header.csrccount
}
/** Returns a specific CSRC identifier.
* Returns a specific CSRC identifier. The parameter \c num can go from 0 to GetCSRCCount()-1.
*/
func (this *RTPPacket) GetCSRC(num uint8) uint32 {
if num >= this.header.csrccount {
return 0
}
return this.header.csrc[num]
}
/** Returns the payload type of the packet. */
func (this *RTPPacket) GetPayloadType() uint8 {
return this.header.payloadtype
}
/** Returns the extended sequence number of the packet.
* Returns the extended sequence number of the packet. When the packet is just received,
* only the low $16$ bits will be set. The high 16 bits can be filled in later.
*/
// func (this *RTPPacket) GetExtendedSequenceNumber() uint32 {
// return this.extseqnr
// }
/** Returns the sequence number of this packet. */
func (this *RTPPacket) GetSequenceNumber() uint16 {
return this.header.sequencenumber //uint16(this.extseqnr & 0x0000FFFF)
}
/** Sets the extended sequence number of this packet to \c seq. */
// func (this *RTPPacket) SetExtendedSequenceNumber(seq uint32) {
// this.extseqnr = seq
// }
/** Returns the timestamp of this packet. */
func (this *RTPPacket) GetTimestamp() uint32 {
return this.header.timestamp
}
/** Returns the SSRC identifier stored in this packet. */
func (this *RTPPacket) GetSSRC() uint32 {
return this.header.ssrc
}
/** Returns a pointer to the actual payload data. */
func (this *RTPPacket) GetPayload() []byte {
return this.payload
}
/** If a header extension is present, this function returns the extension identifier. */
func (this *RTPPacket) GetExtensionID() uint16 {
return this.extension.id
}
/** Returns the length of the header extension data. */
func (this *RTPPacket) GetExtensionLength() uint16 {
return this.extension.length
}
/** Returns the header extension data. */
func (this *RTPPacket) GetExtensionData() []uint32 {
return this.extension.data
}
/** Returns the time at which this packet was received.
* When an RTPPacket instance is created from an RTPRawPacket instance, the raw packet's
* reception time is stored in the RTPPacket instance. This function then retrieves that
* time.
*/
func (this *RTPPacket)
|
GetReceiveTime
|
identifier_name
|
|
rtppacket.go
|
) ParseRawPacket(rawpack *RawPacket) error {
if !rawpack.IsRTP() { // If we didn't receive it on the RTP port, we'll ignore it
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
this.packet = make([]byte, len(rawpack.GetData()))
copy(this.packet, rawpack.GetData())
this.header = NewRTPHeader()
if err := this.header.Parse(this.packet); err != nil {
return err
}
// The version number should be correct
if this.header.version != RTP_VERSION
|
// We'll check if this is possibly a RTCP packet. For this to be possible
// the marker bit and payload type combined should be either an SR or RR
// identifier
if this.header.marker != 0 {
if this.header.payloadtype == (RTP_RTCPTYPE_SR & 127) { // don't check high bit (this was the marker!!)
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
if this.header.payloadtype == (RTP_RTCPTYPE_RR & 127) {
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
}
var numpadbytes, payloadoffset, payloadlength int
payloadoffset = SIZEOF_RTPHEADER + 4*int(this.header.csrccount)
if this.header.extension != 0 { // got header extension
this.extension = NewRTPExtension()
if err := this.extension.Parse(this.packet[payloadoffset:]); err != nil {
return err
}
payloadoffset += SIZEOF_RTPEXTENSION + 4*int(this.extension.length)
} else {
this.extension = nil
}
if this.header.padding != 0 { // adjust payload length to take padding into account
numpadbytes = int(this.packet[len(this.packet)-1]) // last byte contains number of padding bytes
if numpadbytes > len(this.packet)-payloadoffset {
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
} else {
numpadbytes = 0
}
payloadlength = len(this.packet) - numpadbytes - payloadoffset
if payloadlength < 0 {
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
return nil
}
/** Creates a new buffer for an RTP packet and fills in the fields according to the specified parameters.
* If \c maxpacksize is not equal to zero, an error is generated if the total packet size would exceed
* \c maxpacksize. The arguments of the constructor are self-explanatory. Note that the size of a header
* extension is specified in a number of 32-bit words. A memory manager can be installed.
* This constructor is similar to the other constructor, but here data is stored in an external buffer
* \c buffer with size \c buffersize. */
func NewPacket(payloadtype uint8,
payloaddata []byte,
seqnr uint16,
timestamp uint32,
ssrc uint32,
gotmarker bool,
numcsrcs uint8,
csrcs []uint32,
gotextension bool,
extensionid uint16,
extensionlen uint16,
extensiondata []uint32) *RTPPacket {
this := &RTPPacket{}
this.receivetime = &RTPTime{0, 0}
if err := this.BuildPacket(payloadtype,
payloaddata,
seqnr,
timestamp,
ssrc,
gotmarker,
numcsrcs,
csrcs,
gotextension,
extensionid,
extensionlen,
extensiondata); err != nil {
return nil
}
return this
}
func (this *RTPPacket) BuildPacket(payloadtype uint8,
payloaddata []byte,
seqnr uint16,
timestamp uint32,
ssrc uint32,
gotmarker bool,
numcsrcs uint8,
csrcs []uint32,
gotextension bool,
extensionid uint16,
extensionlen uint16,
extensiondata []uint32) error {
if numcsrcs > RTP_MAXCSRCS {
return errors.New("ERR_RTP_PACKET_TOOMANYCSRCS")
}
if payloadtype > 127 { // high bit should not be used
return errors.New("ERR_RTP_PACKET_BADPAYLOADTYPE")
}
if payloadtype == 72 || payloadtype == 73 { // could cause confusion with rtcp types
return errors.New("ERR_RTP_PACKET_BADPAYLOADTYPE")
}
var packetlength, packetoffset int
packetlength = SIZEOF_RTPHEADER
packetlength += int(numcsrcs) * 4 //sizeof(uint32_t)*((size_t)
if gotextension {
packetlength += SIZEOF_RTPEXTENSION //(RTPExtensionHeader);
packetlength += int(extensionlen) * 4 //sizeof(uint32_t)*((size_t)
}
packetlength += len(payloaddata) //payloadlen;
this.packet = make([]byte, packetlength)
// Ok, now we'll just fill in...
this.header = NewRTPHeader()
this.header.version = RTP_VERSION
this.header.padding = 0
if gotextension {
this.header.extension = 1
} else {
this.header.extension = 0
}
this.header.csrccount = numcsrcs
if gotmarker {
this.header.marker = 1
} else {
this.header.marker = 0
}
this.header.payloadtype = payloadtype & 127
this.header.sequencenumber = seqnr
this.header.timestamp = timestamp
this.header.ssrc = ssrc
if numcsrcs != 0 {
this.header.csrc = make([]uint32, numcsrcs)
for i := uint8(0); i < numcsrcs; i++ {
this.header.csrc[i] = csrcs[i] //htonl(csrcs[i]);
}
}
packetoffset = SIZEOF_RTPHEADER + int(numcsrcs)*4
copy(this.packet[0:packetoffset], this.header.Encode())
if gotextension {
this.extension = NewRTPExtension()
this.extension.id = extensionid
this.extension.length = extensionlen //sizeof(uint32_t);
if extensionlen != 0 {
this.extension.data = make([]uint32, extensionlen)
for i := uint16(0); i < extensionlen; i++ {
this.extension.data[i] = extensiondata[i]
}
}
copy(this.packet[packetoffset:packetoffset+SIZEOF_RTPEXTENSION+int(extensionlen)*4], this.extension.Encode())
packetoffset += SIZEOF_RTPEXTENSION + int(extensionlen)*4
} else {
this.extension = nil
}
this.payload = make([]byte, len(payloaddata))
copy(this.payload, payloaddata)
copy(this.packet[packetoffset:packetoffset+len(payloaddata)], payloaddata)
return nil
}
/** Returns \c true if the RTP packet has a header extension and \c false otherwise. */
func (this *RTPPacket) HasExtension() bool {
return this.header.extension != 0
}
/** Returns \c true if the marker bit was set and \c false otherwise. */
func (this *RTPPacket) HasMarker() bool {
return this.header.marker != 0
}
/** Returns the number of CSRCs contained in this packet. */
func (this *RTPPacket) GetCSRCCount() uint8 {
return this.header.csrccount
}
/** Returns a specific CSRC identifier.
* Returns a specific CSRC identifier. The parameter \c num can go from 0 to GetCSRCCount()-1.
*/
func (this *RTPPacket) GetCSRC(num uint8) uint32 {
if num >= this.header.csrccount {
return 0
}
return this.header.csrc[num]
}
/** Returns the payload type of the packet. */
func (this *RTPPacket) GetPayloadType() uint8 {
return this.header.payloadtype
}
/** Returns the extended sequence number of the packet.
* Returns the extended sequence number of the packet. When the packet is just received,
* only the low $16$ bits will be set. The high 16 bits can be filled in later.
*/
// func (this *RTPPacket) GetExtendedSequenceNumber() uint32 {
// return this.extseqnr
// }
/** Returns the sequence number of this packet. */
func (this *RTPPacket) GetSequenceNumber() uint16 {
return this.header.sequencenumber //uint16(this.extseqnr & 0x0000FFFF)
}
/** Sets the extended sequence number of this packet to \c seq. */
// func (this *RTPPacket) SetExtendedSequenceNumber(seq uint32) {
// this.extseqnr = seq
// }
/** Returns the timestamp of this packet. */
func (this *RTPPacket) GetTimestamp() uint32 {
return this.header.timestamp
}
/** Returns the SSRC identifier stored in this packet. */
func (this *RTPPacket) GetSSRC() uint32 {
return this.header.ssrc
}
/** Returns a pointer to the actual payload data. */
func (this *RTPPacket) GetPayload() []byte {
return this.payload
}
/** If a header
|
{
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
|
conditional_block
|
rtppacket.go
|
Packet) ParseRawPacket(rawpack *RawPacket) error {
if !rawpack.IsRTP() { // If we didn't receive it on the RTP port, we'll ignore it
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
this.packet = make([]byte, len(rawpack.GetData()))
copy(this.packet, rawpack.GetData())
this.header = NewRTPHeader()
if err := this.header.Parse(this.packet); err != nil {
return err
}
// The version number should be correct
if this.header.version != RTP_VERSION {
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
// We'll check if this is possibly a RTCP packet. For this to be possible
// the marker bit and payload type combined should be either an SR or RR
// identifier
if this.header.marker != 0 {
if this.header.payloadtype == (RTP_RTCPTYPE_SR & 127) { // don't check high bit (this was the marker!!)
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
if this.header.payloadtype == (RTP_RTCPTYPE_RR & 127) {
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
}
var numpadbytes, payloadoffset, payloadlength int
payloadoffset = SIZEOF_RTPHEADER + 4*int(this.header.csrccount)
if this.header.extension != 0 { // got header extension
this.extension = NewRTPExtension()
if err := this.extension.Parse(this.packet[payloadoffset:]); err != nil {
return err
}
payloadoffset += SIZEOF_RTPEXTENSION + 4*int(this.extension.length)
} else {
this.extension = nil
}
if this.header.padding != 0 { // adjust payload length to take padding into account
numpadbytes = int(this.packet[len(this.packet)-1]) // last byte contains number of padding bytes
|
} else {
numpadbytes = 0
}
payloadlength = len(this.packet) - numpadbytes - payloadoffset
if payloadlength < 0 {
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
return nil
}
/** Creates a new buffer for an RTP packet and fills in the fields according to the specified parameters.
* If \c maxpacksize is not equal to zero, an error is generated if the total packet size would exceed
* \c maxpacksize. The arguments of the constructor are self-explanatory. Note that the size of a header
* extension is specified in a number of 32-bit words. A memory manager can be installed.
* This constructor is similar to the other constructor, but here data is stored in an external buffer
* \c buffer with size \c buffersize. */
func NewPacket(payloadtype uint8,
payloaddata []byte,
seqnr uint16,
timestamp uint32,
ssrc uint32,
gotmarker bool,
numcsrcs uint8,
csrcs []uint32,
gotextension bool,
extensionid uint16,
extensionlen uint16,
extensiondata []uint32) *RTPPacket {
this := &RTPPacket{}
this.receivetime = &RTPTime{0, 0}
if err := this.BuildPacket(payloadtype,
payloaddata,
seqnr,
timestamp,
ssrc,
gotmarker,
numcsrcs,
csrcs,
gotextension,
extensionid,
extensionlen,
extensiondata); err != nil {
return nil
}
return this
}
func (this *RTPPacket) BuildPacket(payloadtype uint8,
payloaddata []byte,
seqnr uint16,
timestamp uint32,
ssrc uint32,
gotmarker bool,
numcsrcs uint8,
csrcs []uint32,
gotextension bool,
extensionid uint16,
extensionlen uint16,
extensiondata []uint32) error {
if numcsrcs > RTP_MAXCSRCS {
return errors.New("ERR_RTP_PACKET_TOOMANYCSRCS")
}
if payloadtype > 127 { // high bit should not be used
return errors.New("ERR_RTP_PACKET_BADPAYLOADTYPE")
}
if payloadtype == 72 || payloadtype == 73 { // could cause confusion with rtcp types
return errors.New("ERR_RTP_PACKET_BADPAYLOADTYPE")
}
var packetlength, packetoffset int
packetlength = SIZEOF_RTPHEADER
packetlength += int(numcsrcs) * 4 //sizeof(uint32_t)*((size_t)
if gotextension {
packetlength += SIZEOF_RTPEXTENSION //(RTPExtensionHeader);
packetlength += int(extensionlen) * 4 //sizeof(uint32_t)*((size_t)
}
packetlength += len(payloaddata) //payloadlen;
this.packet = make([]byte, packetlength)
// Ok, now we'll just fill in...
this.header = NewRTPHeader()
this.header.version = RTP_VERSION
this.header.padding = 0
if gotextension {
this.header.extension = 1
} else {
this.header.extension = 0
}
this.header.csrccount = numcsrcs
if gotmarker {
this.header.marker = 1
} else {
this.header.marker = 0
}
this.header.payloadtype = payloadtype & 127
this.header.sequencenumber = seqnr
this.header.timestamp = timestamp
this.header.ssrc = ssrc
if numcsrcs != 0 {
this.header.csrc = make([]uint32, numcsrcs)
for i := uint8(0); i < numcsrcs; i++ {
this.header.csrc[i] = csrcs[i] //htonl(csrcs[i]);
}
}
packetoffset = SIZEOF_RTPHEADER + int(numcsrcs)*4
copy(this.packet[0:packetoffset], this.header.Encode())
if gotextension {
this.extension = NewRTPExtension()
this.extension.id = extensionid
this.extension.length = extensionlen //sizeof(uint32_t);
if extensionlen != 0 {
this.extension.data = make([]uint32, extensionlen)
for i := uint16(0); i < extensionlen; i++ {
this.extension.data[i] = extensiondata[i]
}
}
copy(this.packet[packetoffset:packetoffset+SIZEOF_RTPEXTENSION+int(extensionlen)*4], this.extension.Encode())
packetoffset += SIZEOF_RTPEXTENSION + int(extensionlen)*4
} else {
this.extension = nil
}
this.payload = make([]byte, len(payloaddata))
copy(this.payload, payloaddata)
copy(this.packet[packetoffset:packetoffset+len(payloaddata)], payloaddata)
return nil
}
/** Returns \c true if the RTP packet has a header extension and \c false otherwise. */
func (this *RTPPacket) HasExtension() bool {
return this.header.extension != 0
}
/** Returns \c true if the marker bit was set and \c false otherwise. */
func (this *RTPPacket) HasMarker() bool {
return this.header.marker != 0
}
/** Returns the number of CSRCs contained in this packet. */
func (this *RTPPacket) GetCSRCCount() uint8 {
return this.header.csrccount
}
/** Returns a specific CSRC identifier.
* Returns a specific CSRC identifier. The parameter \c num can go from 0 to GetCSRCCount()-1.
*/
func (this *RTPPacket) GetCSRC(num uint8) uint32 {
if num >= this.header.csrccount {
return 0
}
return this.header.csrc[num]
}
/** Returns the payload type of the packet. */
func (this *RTPPacket) GetPayloadType() uint8 {
return this.header.payloadtype
}
/** Returns the extended sequence number of the packet.
* Returns the extended sequence number of the packet. When the packet is just received,
* only the low $16$ bits will be set. The high 16 bits can be filled in later.
*/
// func (this *RTPPacket) GetExtendedSequenceNumber() uint32 {
// return this.extseqnr
// }
/** Returns the sequence number of this packet. */
func (this *RTPPacket) GetSequenceNumber() uint16 {
return this.header.sequencenumber //uint16(this.extseqnr & 0x0000FFFF)
}
/** Sets the extended sequence number of this packet to \c seq. */
// func (this *RTPPacket) SetExtendedSequenceNumber(seq uint32) {
// this.extseqnr = seq
// }
/** Returns the timestamp of this packet. */
func (this *RTPPacket) GetTimestamp() uint32 {
return this.header.timestamp
}
/** Returns the SSRC identifier stored in this packet. */
func (this *RTPPacket) GetSSRC() uint32 {
return this.header.ssrc
}
/** Returns a pointer to the actual payload data. */
func (this *RTPPacket) GetPayload() []byte {
return this.payload
}
/** If a header extension is
|
if numpadbytes > len(this.packet)-payloadoffset {
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
|
random_line_split
|
rtppacket.go
|
) ParseRawPacket(rawpack *RawPacket) error {
if !rawpack.IsRTP() { // If we didn't receive it on the RTP port, we'll ignore it
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
this.packet = make([]byte, len(rawpack.GetData()))
copy(this.packet, rawpack.GetData())
this.header = NewRTPHeader()
if err := this.header.Parse(this.packet); err != nil {
return err
}
// The version number should be correct
if this.header.version != RTP_VERSION {
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
// We'll check if this is possibly a RTCP packet. For this to be possible
// the marker bit and payload type combined should be either an SR or RR
// identifier
if this.header.marker != 0 {
if this.header.payloadtype == (RTP_RTCPTYPE_SR & 127) { // don't check high bit (this was the marker!!)
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
if this.header.payloadtype == (RTP_RTCPTYPE_RR & 127) {
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
}
var numpadbytes, payloadoffset, payloadlength int
payloadoffset = SIZEOF_RTPHEADER + 4*int(this.header.csrccount)
if this.header.extension != 0 { // got header extension
this.extension = NewRTPExtension()
if err := this.extension.Parse(this.packet[payloadoffset:]); err != nil {
return err
}
payloadoffset += SIZEOF_RTPEXTENSION + 4*int(this.extension.length)
} else {
this.extension = nil
}
if this.header.padding != 0 { // adjust payload length to take padding into account
numpadbytes = int(this.packet[len(this.packet)-1]) // last byte contains number of padding bytes
if numpadbytes > len(this.packet)-payloadoffset {
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
} else {
numpadbytes = 0
}
payloadlength = len(this.packet) - numpadbytes - payloadoffset
if payloadlength < 0 {
return errors.New("ERR_RTP_PACKET_INVALIDPACKET")
}
return nil
}
/** Creates a new buffer for an RTP packet and fills in the fields according to the specified parameters.
* If \c maxpacksize is not equal to zero, an error is generated if the total packet size would exceed
* \c maxpacksize. The arguments of the constructor are self-explanatory. Note that the size of a header
* extension is specified in a number of 32-bit words. A memory manager can be installed.
* This constructor is similar to the other constructor, but here data is stored in an external buffer
* \c buffer with size \c buffersize. */
func NewPacket(payloadtype uint8,
payloaddata []byte,
seqnr uint16,
timestamp uint32,
ssrc uint32,
gotmarker bool,
numcsrcs uint8,
csrcs []uint32,
gotextension bool,
extensionid uint16,
extensionlen uint16,
extensiondata []uint32) *RTPPacket {
this := &RTPPacket{}
this.receivetime = &RTPTime{0, 0}
if err := this.BuildPacket(payloadtype,
payloaddata,
seqnr,
timestamp,
ssrc,
gotmarker,
numcsrcs,
csrcs,
gotextension,
extensionid,
extensionlen,
extensiondata); err != nil {
return nil
}
return this
}
func (this *RTPPacket) BuildPacket(payloadtype uint8,
payloaddata []byte,
seqnr uint16,
timestamp uint32,
ssrc uint32,
gotmarker bool,
numcsrcs uint8,
csrcs []uint32,
gotextension bool,
extensionid uint16,
extensionlen uint16,
extensiondata []uint32) error {
if numcsrcs > RTP_MAXCSRCS {
return errors.New("ERR_RTP_PACKET_TOOMANYCSRCS")
}
if payloadtype > 127 { // high bit should not be used
return errors.New("ERR_RTP_PACKET_BADPAYLOADTYPE")
}
if payloadtype == 72 || payloadtype == 73 { // could cause confusion with rtcp types
return errors.New("ERR_RTP_PACKET_BADPAYLOADTYPE")
}
var packetlength, packetoffset int
packetlength = SIZEOF_RTPHEADER
packetlength += int(numcsrcs) * 4 //sizeof(uint32_t)*((size_t)
if gotextension {
packetlength += SIZEOF_RTPEXTENSION //(RTPExtensionHeader);
packetlength += int(extensionlen) * 4 //sizeof(uint32_t)*((size_t)
}
packetlength += len(payloaddata) //payloadlen;
this.packet = make([]byte, packetlength)
// Ok, now we'll just fill in...
this.header = NewRTPHeader()
this.header.version = RTP_VERSION
this.header.padding = 0
if gotextension {
this.header.extension = 1
} else {
this.header.extension = 0
}
this.header.csrccount = numcsrcs
if gotmarker {
this.header.marker = 1
} else {
this.header.marker = 0
}
this.header.payloadtype = payloadtype & 127
this.header.sequencenumber = seqnr
this.header.timestamp = timestamp
this.header.ssrc = ssrc
if numcsrcs != 0 {
this.header.csrc = make([]uint32, numcsrcs)
for i := uint8(0); i < numcsrcs; i++ {
this.header.csrc[i] = csrcs[i] //htonl(csrcs[i]);
}
}
packetoffset = SIZEOF_RTPHEADER + int(numcsrcs)*4
copy(this.packet[0:packetoffset], this.header.Encode())
if gotextension {
this.extension = NewRTPExtension()
this.extension.id = extensionid
this.extension.length = extensionlen //sizeof(uint32_t);
if extensionlen != 0 {
this.extension.data = make([]uint32, extensionlen)
for i := uint16(0); i < extensionlen; i++ {
this.extension.data[i] = extensiondata[i]
}
}
copy(this.packet[packetoffset:packetoffset+SIZEOF_RTPEXTENSION+int(extensionlen)*4], this.extension.Encode())
packetoffset += SIZEOF_RTPEXTENSION + int(extensionlen)*4
} else {
this.extension = nil
}
this.payload = make([]byte, len(payloaddata))
copy(this.payload, payloaddata)
copy(this.packet[packetoffset:packetoffset+len(payloaddata)], payloaddata)
return nil
}
/** Returns \c true if the RTP packet has a header extension and \c false otherwise. */
func (this *RTPPacket) HasExtension() bool {
return this.header.extension != 0
}
/** Returns \c true if the marker bit was set and \c false otherwise. */
func (this *RTPPacket) HasMarker() bool {
return this.header.marker != 0
}
/** Returns the number of CSRCs contained in this packet. */
func (this *RTPPacket) GetCSRCCount() uint8 {
return this.header.csrccount
}
/** Returns a specific CSRC identifier.
* Returns a specific CSRC identifier. The parameter \c num can go from 0 to GetCSRCCount()-1.
*/
func (this *RTPPacket) GetCSRC(num uint8) uint32 {
if num >= this.header.csrccount {
return 0
}
return this.header.csrc[num]
}
/** Returns the payload type of the packet. */
func (this *RTPPacket) GetPayloadType() uint8 {
return this.header.payloadtype
}
/** Returns the extended sequence number of the packet.
* Returns the extended sequence number of the packet. When the packet is just received,
* only the low $16$ bits will be set. The high 16 bits can be filled in later.
*/
// func (this *RTPPacket) GetExtendedSequenceNumber() uint32 {
// return this.extseqnr
// }
/** Returns the sequence number of this packet. */
func (this *RTPPacket) GetSequenceNumber() uint16
|
/** Sets the extended sequence number of this packet to \c seq. */
// func (this *RTPPacket) SetExtendedSequenceNumber(seq uint32) {
// this.extseqnr = seq
// }
/** Returns the timestamp of this packet. */
func (this *RTPPacket) GetTimestamp() uint32 {
return this.header.timestamp
}
/** Returns the SSRC identifier stored in this packet. */
func (this *RTPPacket) GetSSRC() uint32 {
return this.header.ssrc
}
/** Returns a pointer to the actual payload data. */
func (this *RTPPacket) GetPayload() []byte {
return this.payload
}
/** If a header
|
{
return this.header.sequencenumber //uint16(this.extseqnr & 0x0000FFFF)
}
|
identifier_body
|
pxer.js
|
w Pxer();
myPxer.initialize();
nutjs.addEve(myPxer.px.bn_run,'click',function(){
if(myPxer.just_get()){
nutjs.ll("将采用获取单图方式");
return;
}else if(myPxer.read()){//可以批量get
nutjs.ll("将采用批量获取方式");
myPxer.px.pxer_showState.style.display="block";
}else{
nutjs.le("Pxer不知道该怎么做");
};
});
if(bn === true){//外观调试模式,展开所有面板
with(myPxer.px){
pxer_main.style.display="block";
pxer_about.style.display="block";
pxer_config.style.display="block";
pxer_showState.style.display="block";
pxer_process.style.display="block";
pxer_filter.style.display="block";
pxer_print.style.display="block";
pxer_print.style.display="block";
//添加线程对象
var new_elt;
for(var i=0;i<6;i++){
new_elt=pxer_thread.cloneNode(true);
new_elt.id="pxer_thread"+(i+1);
new_elt.getElementsByTagName("legend")[0].innerHTML="线程"+(i+1);
if(i%2){
new_elt.getElementsByTagName("em")[0].className="pxer_ok";
new_elt.getElementsByTagName("em")[0].innerHTML="运行中";
}else{
new_elt.getElementsByTagName("em")[0].className="pxer_no";
new_elt.getElementsByTagName("em")[0].innerHTML="已停止";
new_elt.getElementsByTagName("em")[1].innerHTML="999";
}
pxer_process.appendChild(new_elt);
};
}
};
}
/*--- Pxer ---*/
function Pxer(){
this.px={};//存放所有pxer中有id的elt对象,key为id
this.is_support=false;//是否属于支持页面
this.address=[];//将要被输出的下载地址
this.addressObj=[];//下载地址对象
this.thread;//线程数
this.wait;//最大等待时间
this.okThread=0;//执行完毕的线程数
this.maxThread=1;//最大的线程数,取页数与用户设定的线程的最小值
this.threadObj=[];//存放着线程的对象
this.queue=[];//队列
this.queue_num=1;//队列的数量
this.queue_finish_num=0;//队列已完成的数量
this.once_completion_time=1;//执行每一个队列需要花费的时间,用于计算预计花费的时间
this.running_time=0;//程序运行的时间
this.running_timer=0;//程序运行的时间的指针
this.remaining_time=1;//剩余时间
this.upTimer;//定时触发更新显示窗口的时间指针
};
Pxer.prototype. xxxx =function(){
};
Pxer.prototype. initialize =function(){
var that=this;
var all_elt=document.getElementById('pxer').getElementsByTagName('*');
for(var i=0;i<all_elt.length;i++){
if(all_elt[i].id){
this.px[all_elt[i].id]=all_elt[i];
};
};
this.px.pxer_main.style.display="block";
nutjs.display(this.px.bn_expert ,this.px.pxer_config);
nutjs.display(this.px.bn_about ,this.px.pxer_about);
nutjs.display(this.px.bn_process ,this.px.pxer_process,null,null,'隐藏线程');
//nutjs.display(this.px.bn_filter ,this.px.pxer_filter,null,null,'关闭过滤');
nutjs.addEve(this.px.bn_log ,'click',function(){
window.open().document.write(nutjs.l);
});
nutjs.addEve(this.px.bn_save ,'click',function(){
that.px.bn_run.click();
});
nutjs.addEve(this.px.bn_filter ,'click',function(){
if(getComputedStyle(that.px.pxer_filter,null).display == 'none'){
that.px.switch_filter.className='pxer_no';
that.px.switch_filter.innerHTML='禁用';
}else{
that.px.switch_filter.className='pxer_ok';
that.px.switch_filter.innerHTML='启用';
}
});
/*运行开始读取队列*/
nutjs.addEve(this.px.bn_getall ,'click',function(){
if(this.innerHTML == "开始执行"){
that.getAll();
this.innerHTML="停止执行"
}else{
this.disabled=true;
var queueNum=that.queue.length;
while(that.queue.pop());
that.queue.length=queueNum;
that.okThread = that.maxThread;
that.theadok(1);
this.innerHTML="已停止";
};
});
};
Pxer.prototype. read =function(){
var page_type={
'search' : {
"pxer_page_type" : "标签页",
"works" : "class=count-badge"
},
'member_illust' : {
"pxer_page_type" : "作品页",
"works" : "class=count-badge"
},
'bookmark' : {
"pxer_page_type" : "收藏页",
"works" : "class=count-badge"
}
};
var temp_reg='';
var that=this;
for(var key in page_type){
temp_reg=new RegExp(key);
if(temp_reg.test(document.URL)){//定位页面
var temp_arr=page_type[key].works.split('=');
var temp_elt=nutjs.getE(temp_arr[0],temp_arr[1]);
if(!temp_elt) return false;
var works=parseInt(temp_elt.innerHTML);
this.is_support=true;
this.px.pxer_page_type.innerHTML =page_type[key].pxer_page_type;//当前位置
nutjs.addKey(this.px.pxer_page_type,'class','act ','+=');
this.px.pxer_works.innerHTML =works;//作品数量
/*估算队列与时间*/
var page_num=Math.ceil(works/20);
this.queue_num=page_num +works;
/*最大等待时间*/
this.wait=this.px.config_wait.value;
/*添加队列*/
this.queue=[];
for(var i=0;i<page_num;i++){
this.queue.push(document.URL+"&p="+(i+1));
};
/*初始化线程数,不允许线程超过页数*/
this.thread=this.px.config_thread.value;
this.maxThread = +(this.queue.length>this.thread?this.thread:this.queue.length);
//显示效果
this.px.show_wait.innerHTML=this.wait;
this.px.show_thread.innerHTML=this.maxThread;
/*显示结果*/
this.queue_show_update();
return true;
}
};
return false;
};
Pxer.prototype. just_get =function(_url){//获取单个作品专用
if(/member_illust/.test(document.URL) && /mode=medium/.test(document.URL)){
var url =_url ||document.URL;
pxget=new PxGet(url);
pxget.fn=function(adr){
nutjs.print_r(this.pr);
};
pxget.workHtml=document.body.innerHTML;
pxget.get();
return true;
}else{
return false;
};
};
Pxer.prototype. getAll =function(){//获取全部作品
var that=this;
//开始计时
this.running_timer=setInterval(function(){
that.running_time++;
},1000);
//初始化线程对象
for(var i=0;i<this.maxThread;i++){
this.threadObj.push(new Thread(this));
this.threadObj[i].id=i+1;
this.threadObj[i].run();
};
//显示线程窗口
this.px.bn_process.click();
//初始化并且定时更新显示窗口
this.px.pxer_state.className="";
this.px.pxer_state.innerHTML="执行中";
var new_elt;
for(var i=0;i<this.maxThread;i++){
new_elt=this.px.pxer_thread.cloneNode(true);
new_elt.id="pxer_thread"+(i+1);
new_elt.getElementsByTagName("legend")[0].innerHTML="线程"+(i+1);
new_elt.getElementsByTagName("em")[0].className="pxer_ok";
new_elt.getElementsByTagName("em")[0].innerHTML="运行中";
this.px.pxer_process.appendChild(new_elt);
};
this.upTimer=setInterval(function(){
that.queue_show_update.call(that);
},500);
};
Pxer.prototype. theadok =function(threadId){//队列执行完毕执行的回调函数
var that=this;
var threadState=document.getElementById("pxer_thread"+threadId).getElementsByTagName("em")[0];
threadState.innerHTML="停止";
threadState.className="pxer_no"
if(++this.okThread >= this.maxThread){//全部队列执行完毕
//清除定时时间计算
clearInterval(this.running_timer);
clearInterval(this.upTimer);
this.queue_show_update();
//更新显示状态
this.px.pxer_state.className="pxer_ok";
this.px.pxer_state.innerHTML="执行完毕";
this.px.pxer_print.style.display="block";
nutjs.addEve(this.px.bn_print,'click',function(){
that.print.call(that);
});
//整合下载地址对象
var temp_arr=[];
for(var i=0;i<this.threadObj.length;i++){
temp_arr=temp_arr.concat(this.threadObj[i
|
myPxer=ne
|
identifier_name
|
|
pxer.js
|
"http://#server#.pixiv.net/img-original/img/#date#/#workid#_p#picnum#.#fx#",
"http://#server#.pixiv.net/c/1200x1200/img-master/img/#date#/#workid#_p#picnum#_master1200.jpg",
"http://#server#.pixiv.net/c/600x600/img-master/img/#date#/#workid#_p0_master1200.jpg",
""
],
'sids':["http://#server#.pixiv.net/c/600x600/img-master/img/#date#/#workid#_p0_master1200.jpg"],
'zip':[
'http://#server#.pixiv.net/img-zip-ugoira/img/#date#/#workid#_ugoira1920x1080.zip',
'http://#server#.pixiv.net/img-zip-ugoira/img/#date#/#workid#_ugoira600x600.zip',
''
]
};
var tmp_address='';
var tmp_type;
var tmp_size;
for(var i=0;i<this.addressObj.length;i++){
tmp_type=this.addressObj[i].type;
tmp_size=config_obj["config_"+tmp_type+"_o"]
if(tmp_size == undefined) continue;//如果是其他不需要输出的类型,直接跳过读取模板
tmp_address=output_template[tmp_type][tmp_size]
.replace("#fx#",this.addressObj[i].fx)
.replace("#workid#",this.addressObj[i].workid)
.replace("#date#",this.addressObj[i].date)
.replace("#server#",this.addressObj[i].server)
;
if(/#picnum#/.test(tmp_address)){
for(var v=0;v<this.addressObj[i].picnum;v++){
this.address.push(tmp_address.replace("#picnum#",v));
};
}else{
this.address.push(tmp_address);
};
};
//输出
var win=window.open();
/*
Pxer beta 4.1.1 2015-10-29
耗时 33 秒
============================
漫画 最大输出 21
插画 600P输出 34
动图 No输出 0
============================
共计 55 幅作品 307个下载地址
============================
*/
var date = new Date()
var dateStr=date.getFullYear()+"-"+(date.getMonth()+1)+"-"+date.getDate()
win.document.write("Pxer "+this.px.pxer_version.innerHTML+" "+dateStr+"<br />");
win.document.write("============================"+"<br />");
var work_num_obj={"pic":0,"ids":0,"sids":0,"zip":0,"no_permission":0};
for(var i=0;i<this.addressObj.length;i++){
for(var key in work_num_obj){
if(this.addressObj[i].type == key) work_num_obj[key]++
};
};
var checked_index;
var tmp_html;
for(var key in work_num_obj){
if(key == 'sids'){
tmp_html="600p"
}else{
checked_index=config_obj["config_"+key+"_o"]
if(checked_index == undefined) continue;//如果是其他不需要输出的类型,直接跳过
tmp_html=config_obj["config_"+key][checked_index].value
};
win.document.write(" ->"+key+" --- 【"+tmp_html+"】 --- 【"+work_num_obj[key]+"】<br />");
};
win.document.write(" ->no_permission --- 【"+work_num_obj[key]+"】<br />");
win.document.write("共计 "+this.addressObj.length+" 幅作品,"+this.address.length+" 个下载地址<br />");
win.document.write("============================"+"<br />");
win.document.write("共耗时 "+this.running_time+" 秒,平均每秒 "+(this.addressObj.length/this.running_time).toFixed(2)+" 张<br />");
win.document.write("采用 "+this.maxThread+" 线程,平均每张花费 "+(this.addressObj.length/this.running_time/this.maxThread).toFixed(2)+" 秒"+"<br />");
win.document.write("============================"+"<br />");
for(var i=0;i<this.address.length;i++){
if(this.address[i]){
win.document.write(this.address[i]);
win.document.write('<br />');
};
}
//检测是否有动图参数
if(config_obj["config_zip"][config_obj["config_zip_o"]].value != 'No' && work_num_obj["zip"]){
var zip_config=window.open();
for(var i=0;i<this.addressObj.length;i++){
if(this.addressObj[i].type == 'zip'){
zip_config.document.write('{"id":"'+this.addressObj[i].workid+'","config":'+this.addressObj[i].zipo+'}<br />');
}
};
};
};
Pxer.prototype. queue_show_update=function(){//更新显示效果
//未完成的数量
this.px.show_queue_num.innerHTML=this.queue_num;
var finish_address=0;
var finish_list=0;
for(var i=0;i<this.threadObj.length;i++){
finish_address+= (this.threadObj[i].address.length);
finish_list+= (this.threadObj[i].strTask);
}
if(finish_address ==0){
this.queue_finish_num = finish_list;
}else{
this.queue_finish_num = (this.queue_num - this.queue.length);
}
this.px.show_queue_finish_num.innerHTML =this.queue_finish_num;
//执行时间与剩余时间
this.px.show_running_time.innerHTML=Math.floor(this.running_time/60)+":" +this.add0( this.running_time%60 );
this.remaining_time= this.running_time/(this.queue_finish_num/this.queue_num)-this.running_time;
if(isNaN(this.remaining_time))this.remaining_time=(this.queue_num *this.once_completion_time)/this.maxThread;
this.px.show_remaining_time.innerHTML=Math.floor(this.remaining_time/60)+":" +this.add0( Math.floor(this.remaining_time%60));
//线程窗口
var tpm_elt;
var tpm_arr;
for(var i=0;i<this.threadObj.length;i++){
tpm_elt=document.getElementById("pxer_thread"+(i+1));
tpm_elt.getElementsByTagName("em")[1].innerHTML=this.threadObj[i].address.length;
};
};
Pxer.prototype. add0 =function(num){//将个位数的数字前面加0
if(num<10) return "0"+num;
return num;
};
/*--- Thread --- 线程,读取队列调用PxGet,内置判断作品类型与初始作品队列 ---*/
function Thread(pxer){
this.pxer=pxer;
this.queue=pxer.queue;//队列的引用
this.task;//当前执行的任务
this.strTask=0;//解析完毕str类型(目录)的任务
this.address=[];//当前进程获取到的下载地址,全是对象
this.id;
this.ajax=new nutjs.ajax_class();
this.startTime=0;
this.timer;
};
Thread.prototype. run =function(){
clearInterval(this.timer);
this.startTime=0
this.task=this.queue.shift();
if(this.task){//开始执行
this.getEngine();
}else{
nutjs.ll("线程【"+this.id+"】已完成任务");
this.pxer.theadok(this.id);
};
};
Thread.prototype. getEngine =function(){
var that=this;
if(typeof this.task === "string"){//作品目录
this.ajax.url=this.task;
this.ajax.mode="get";
this.ajax.waitBn=true;
this.ajax.waitTime=this.pxer.wait;
this.ajax.fn=function(re){
that.getWorkqueue(re);
that.strTask++;
that.run();
};
this.ajax.send();
//监视Ajax,保证程序健壮性
}else if(typeof this.task === "object"){/*作品obj对象
url:作品的workUrl
type:作品的类型[pic|ids|zip]
*/
var pxget=new PxGet(this.pxer);
pxget.workUrl=this.task.url;
pxget.pr.type=this.task.type;
pxget.fn=function(re){
that.address.push(this.pr);
that.run();
};
pxget.get();
}else{
nutjs.lw("线程【"+this.id+"】丢失任务,时间指针为【"+this.timer+"】任务的类型为【"+typeof this.task+"】,任务为【"+this.task+"】");
this.run();
return;
};
};
Thread.prototype. getWorkqueue =function(html){
var reg=/<a[^<>]*?href="([^"<>]*)"[^<>]*?class="(work\s+_work[^<>"]*)"[^<>]*>/img;
var te
|
mp_arr=html.match(reg);
for(var i=0;i<temp_arr.length;i++){
var obj=new Object();
var arr=reg.exec(temp_arr[i]);
if(! /^\//.test(arr[1]))arr[1]="/"+arr[1];
obj.url="http://www.pixiv.net"+arr[1];
reg.lastIndex=0;//因为启用全局调用了exec
if(/ugoku\-illust/.test(arr[2])){
obj.type="zip";
}else if(/
|
identifier_body
|
|
pxer.js
|
reg=/<a[^<>]*?href="([^"<>]*)"[^<>]*?class="(work\s+_work[^<>"]*)"[^<>]*>/img;
var temp_arr=html.match(reg);
for(var i=0;i<temp_arr.length;i++){
var obj=new Object();
var arr=reg.exec(temp_arr[i]);
if(! /^\//.test(arr[1]))arr[1]="/"+arr[1];
obj.url="http://www.pixiv.net"+arr[1];
reg.lastIndex=0;//因为启用全局调用了exec
if(/ugoku\-illust/.test(arr[2])){
obj.type="zip";
}else if(/multiple/.test(arr[2])){
obj.type="ids";
}else if(/manga/.test(arr[2])){
obj.type="sids";
}else if(/^\s*work\s*_work\s*$/.test(arr[2])){
obj.type="pic";
}else{
nutjs.le("函数getWorkqueue无法判断作品类型!class【"+arr[2]+"】,href【"+arr[1]+"】");
continue;
}
this.queue.push(obj);
};
};
/*--- PxGet --- 获取单个页面的下载地址 ---*/
function PxGet(pxer){//获取页面的图片链接
this.pxer=pxer;//对pxer源对象的直接访问
this.fn;//执行完毕后的回调函数
/*下面的参数分别是程序执行的获取步骤*/
this.pr={};/*获取到的作品参数对象,参考本文件开始的说明
作品存储方式
{
type : ids/pic/zip 作品的类型
fx : jpg/gif/png 作品的扩展名
workid : \d+ 作品的ID
date : [Y,m,d,h,m,s] 作品的投稿时间
server : i\d 作品所被分配的服务器
[picnum]: \d+ ids专用,作品的数量
[zipo] : [\w\W]* zip专用,zip的动态参数
}
*/
/*this.sidsHtml单个漫画作品的大图页html
可以通过本参数计算出下面参数
address
*/
/*this.sidsUrl单个漫画作品的大图页
可以通过本参数计算出下面参数
sidsHtml
*/
this.idsUrl1Html;/*漫画作品索引页html
可以通过本参数计算出下面参数
pr.picnum
*/
this.idsUrl2Html;/*漫画作品单个大图页html
可以通过本参数计算出下面参数
address
*/
this.idsUrl1;/*漫画作品索引页
可以通过本参数计算出下面参数
idsUrl1Html
*/
this.idsUrl2;/*漫画作品单个大图页
可以通过本参数计算出下面参数
idsUrl2Html
*/
this.address;/*作品的下载地址
可以通过本参数计算出下面参数
pr.fx
pr.workid
pr.date
pr.server
*/
this.workHtml;/*作品的html
可以通过本参数计算出下面参数
pr.type
if $pr.type=pic then
address
*/
this.workUrl;/*作品的url
可以通过本参数计算出下面参数
workHtml -net
*/
this.workId;/*作品id @address
可以通过本参数计算出下面参数
workUrl
idsUrl1
idsUrl2
sidsUrl
*/
/*运行参数*/
this.create_url={};
};
PxGet.prototype. get=function(){
if(this.isOK()) return this.fn();
if(this.pr.type){
switch(this.pr.type){
case 'pic':
if(this.address){
this.get_pr_from_address();
}else if(this.workHtml){
this.get_address_from_workHtml();
}else if(this.workUrl){
this.get_workHtml_from_workUrl();
return;
}else{
this.pr.type=null;
};
break;
case 'ids':
if(this.address){
this.get_pr_from_address();
}else if(this.idsUrl1Html || this.idsUrl2Html){
if(this.idsUrl1Html && this.idsUrl2Html){
this.get_address_from_idsUrl2Html();
this.get_prPicnum_from_idsUrl1Html();
}else{
return;
};
}else if(this.idsUrl1 && this.idsUrl2){
this.get_idsUrl1Html_from_idsUrl1();
this.get_idsUrl2Html_from_idsUrl2();
return;
}else if(this.workId){
this.get_idsUrlx_from_workId();
}else if(this.workHtml){
this.get_workId_from_workHtml();
}else if(this.workUrl){
this.get_workHtml_from_workUrl();
return;
}else{
this.pr.type=null;
};
break;
case 'sids':
if(this.workHtml){
this.get_pr_from_workHtml();
}else if(this.workUrl){
this.get_workHtml_from_workUrl();
return;
}else{
this.pr.type=null;
};
break;
case 'zip':
if(this.address){
this.get_pr_from_address();
}else if(this.workHtml){
this.get_address_zipo_from_workHtml_zip();
}else if(this.workUrl){
this.get_workHtml_from_workUrl();
return;
}else{
this.pr.type=null;
};
break;
default:
nutjs.le('函数PxGet.get,未知的prType值【'+this.pr.type+"】");
};
}else{
if(this.workHtml){
this.get_prType_from_workHtml();
}else if(this.workUrl){
this.get_workHtml_from_workUrl();
return;
}else if(this.workId){
this.get_workUrl_from_workId();
}else{
nutjs.le("函数PxGet.get,参数不足以获取");
};
};
this.get();
};
PxGet.prototype. isOK=function(){
if(
this.pr.type &&
this.pr.server &&
(this.pr.workid||isNaN(this.pr.workid))&&
this.pr.date
){
return true
}else if(this.pr.type == 'no_permission'){
return true
}else{
return false
};
};
PxGet.prototype. stop=function(){
this.fn=function(){};
this.get=function(){};
};
/*通用获取*/
PxGet.prototype. get_workUrl_from_workId=function(){
this.workUrl="http://www.pixiv.net/member_illust.php?mode=medium&illust_id="+this.workId;
};
PxGet.prototype. get_workHtml_from_workUrl=function(){
var that=this;
var ajax=new nutjs.ajax_class();
ajax.url=this.workUrl;
ajax.mode="get";
ajax.waitBn=true;
if(this.pxer)ajax.waitTime=this.pxer.wait;
ajax.fn=function(re){
//校验作品是否为私人作品,必须好P友才可打开
if(/sprites\-mypixiv\-badge/.test(re)){
that.pr.type="no_permission";
that.pr.workid=NaN
};
that.workHtml=re;
that.get();
};
ajax.send();
};
PxGet.prototype. get_prType_from_workHtml=function(){
var is_ids=/class[^<>]*works_display[^<>]*>[\w\W]*?<a[\w\W]*?href[^"']*?"([^"']*?mode[^"']*?manga[^"']*)"/mi;
var is_sids=/class[^<>]*works_display[^<>]*>[\w\W]*?<a[\w\W]*?href[^"']*?"([^"']*?mode[^"']*?big[^"']*)"/mi;
var is_pic=/<img[^<>]*data-src[^"]"([^\{\}]*?)"[^<>]*class[^<>]*original-image/mi;
var is_zip=/ugoira600x600\.zip/im;
if(is_ids.test(this.workHtml)){
this.pr.type="ids";
}else if(is_pic.test(this.workHtml)){
this.pr.type="pic";
}else if(is_sids.test(this.workHtml)){
this.pr.type="sids";
}else if(is_zip.test(this.workHtml)){
this.pr.type="zip";
}else{
ePxer.push(this);
nutjs.lw("函数get_prType_from_workHtml无法通过workHtml鉴别出prType!回滚操作,并添加当前PxGet对象 $ePxer["+(ePxer.length-1)+"]");
this.pr.workHtml=null;
return this.fn();
}
};
PxGet.prototype. get_pr_from_address=function(){
var reg=/http:\/\/([^\.]*)\.pixiv.net\/[^"<>]*?\/([\d\/]{19})\/(\d+)_\w+?\.(\w+)/;
var arr=reg.exec(this.address);
//if(!arr)alert(this.pr.type+"\n"+this.sidsUrl+"\n"+this.sidsHtml);
this.pr.fx=arr[4];
this.pr.workid=arr[3];
this.pr.date=arr[2];
this.pr.server=arr[1];
};
/*pic专用*/
PxGet.prototype. get_address_from_workHtml=funct
|
ion(){
var reg=/<img[^<>]*data-src[^"]"(
|
conditional_block
|
|
pxer.js
|
this.thread=this.px.config_thread.value;
this.maxThread = +(this.queue.length>this.thread?this.thread:this.queue.length);
//显示效果
this.px.show_wait.innerHTML=this.wait;
this.px.show_thread.innerHTML=this.maxThread;
/*显示结果*/
this.queue_show_update();
return true;
}
};
return false;
};
Pxer.prototype. just_get =function(_url){//获取单个作品专用
if(/member_illust/.test(document.URL) && /mode=medium/.test(document.URL)){
var url =_url ||document.URL;
pxget=new PxGet(url);
pxget.fn=function(adr){
nutjs.print_r(this.pr);
};
pxget.workHtml=document.body.innerHTML;
pxget.get();
return true;
}else{
return false;
};
};
Pxer.prototype. getAll =function(){//获取全部作品
var that=this;
//开始计时
this.running_timer=setInterval(function(){
that.running_time++;
},1000);
//初始化线程对象
for(var i=0;i<this.maxThread;i++){
this.threadObj.push(new Thread(this));
this.threadObj[i].id=i+1;
this.threadObj[i].run();
};
//显示线程窗口
this.px.bn_process.click();
//初始化并且定时更新显示窗口
this.px.pxer_state.className="";
this.px.pxer_state.innerHTML="执行中";
var new_elt;
for(var i=0;i<this.maxThread;i++){
new_elt=this.px.pxer_thread.cloneNode(true);
new_elt.id="pxer_thread"+(i+1);
new_elt.getElementsByTagName("legend")[0].innerHTML="线程"+(i+1);
new_elt.getElementsByTagName("em")[0].className="pxer_ok";
new_elt.getElementsByTagName("em")[0].innerHTML="运行中";
this.px.pxer_process.appendChild(new_elt);
};
this.upTimer=setInterval(function(){
that.queue_show_update.call(that);
},500);
};
Pxer.prototype. theadok =function(threadId){//队列执行完毕执行的回调函数
var that=this;
var threadState=document.getElementById("pxer_thread"+threadId).getElementsByTagName("em")[0];
threadState.innerHTML="停止";
threadState.className="pxer_no"
if(++this.okThread >= this.maxThread){//全部队列执行完毕
//清除定时时间计算
clearInterval(this.running_timer);
clearInterval(this.upTimer);
this.queue_show_update();
//更新显示状态
this.px.pxer_state.className="pxer_ok";
this.px.pxer_state.innerHTML="执行完毕";
this.px.pxer_print.style.display="block";
nutjs.addEve(this.px.bn_print,'click',function(){
that.print.call(that);
});
//整合下载地址对象
var temp_arr=[];
for(var i=0;i<this.threadObj.length;i++){
temp_arr=temp_arr.concat(this.threadObj[i].address);
}
for(var i=0;i<temp_arr.length;i++){
this.addressObj=this.addressObj.concat(temp_arr[i]);
}
//清除重复字段
for(var i=0;i<this.addressObj.length-1;i++){
for(var v=i+1;v<this.addressObj.length;v++){
if(this.addressObj[i].workid == this.addressObj[v].workid){
this.addressObj.splice(v,1);
v--;
};
};
};
};
};
Pxer.prototype. print =function(){
//初始化信息,防止多次调用
this.address=[];//将要被输出的下载地址
//读取用户设置的输出选项
var config_arr=['config_pic','config_ids','config_zip'];
var config_obj={"config_sids_o":0};
for(var i=0;i<config_arr.length;i++){
config_obj[config_arr[i]]=document.getElementsByName(config_arr[i]);
for(var v=0; v<config_obj[ config_arr[i] ].length ;v++){
if(config_obj[config_arr[i]][v].checked){
config_obj[config_arr[i]+"_o"]=v;
};
};
};
/*将参数对象转换为下载地址
type : ids/pic/zip 作品的类型
fx : jpg/gif/png 作品的扩展名
workid : \d+ 作品的ID
date : [Y,m,d,h,m,s] 作品的投稿时间
server : i\d 作品所被分配的服务器
[picnum]: \d+ ids专用,作品的数量
[zipo] : [\w\W]* zip专用,zip的动态参数
*/
var output_template={
'pic':[
"http://#server#.pixiv.net/img-original/img/#date#/#workid#_p0.#fx#",
"http://#server#.pixiv.net/c/600x600/img-master/img/#date#/#workid#_p0_master1200.jpg",
""
],
'ids':[
"http://#server#.pixiv.net/img-original/img/#date#/#workid#_p#picnum#.#fx#",
"http://#server#.pixiv.net/c/1200x1200/img-master/img/#date#/#workid#_p#picnum#_master1200.jpg",
"http://#server#.pixiv.net/c/600x600/img-master/img/#date#/#workid#_p0_master1200.jpg",
""
],
'sids':["http://#server#.pixiv.net/c/600x600/img-master/img/#date#/#workid#_p0_master1200.jpg"],
'zip':[
'http://#server#.pixiv.net/img-zip-ugoira/img/#date#/#workid#_ugoira1920x1080.zip',
'http://#server#.pixiv.net/img-zip-ugoira/img/#date#/#workid#_ugoira600x600.zip',
''
]
};
var tmp_address='';
var tmp_type;
var tmp_size;
for(var i=0;i<this.addressObj.length;i++){
tmp_type=this.addressObj[i].type;
tmp_size=config_obj["config_"+tmp_type+"_o"]
if(tmp_size == undefined) continue;//如果是其他不需要输出的类型,直接跳过读取模板
tmp_address=output_template[tmp_type][tmp_size]
.replace("#fx#",this.addressObj[i].fx)
.replace("#workid#",this.addressObj[i].workid)
.replace("#date#",this.addressObj[i].date)
.replace("#server#",this.addressObj[i].server)
;
if(/#picnum#/.test(tmp_address)){
for(var v=0;v<this.addressObj[i].picnum;v++){
this.address.push(tmp_address.replace("#picnum#",v));
};
}else{
this.address.push(tmp_address);
};
};
//输出
var win=window.open();
/*
Pxer beta 4.1.1 2015-10-29
耗时 33 秒
============================
漫画 最大输出 21
插画 600P输出 34
动图 No输出 0
============================
共计 55 幅作品 307个下载地址
============================
*/
var date = new Date()
var dateStr=date.getFullYear()+"-"+(date.getMonth()+1)+"-"+date.getDate()
win.document.write("Pxer "+this.px.pxer_version.innerHTML+" "+dateStr+"<br />");
win.document.write("============================"+"<br />");
var work_num_obj={"pic":0,"ids":0,"sids":0,"zip":0,"no_permission":0};
for(var i=0;i<this.addressObj.length;i++){
for(var key in work_num_obj){
if(this.addressObj[i].type == key) work_num_obj[key]++
};
};
var checked_index;
var tmp_html;
for(var key in work_num_obj){
if(key == 'sids'){
tmp_html="600p"
}else{
checked_index=config_obj["config_"+key+"_o"]
if(checked_index == undefined) continue;//如果是其他不需要输出的类型,直接跳过
tmp_html=config_obj["config_"+key][checked_index].value
};
win.document.write(" ->"+key+" --- 【"+tmp_html+"】 --- 【"+work_num_obj[key]+"】<br />");
};
win.document.write(" ->no_permission --- 【"+work_num_obj[key]+"】<br />");
win.document.write("共计 "+this.addressObj.length+" 幅作品,"+this.address.length+" 个下载地址<br />");
win.document.write("============================"+"<br />");
win.document.write("共耗时 "+this.running_time+" 秒,平均每秒 "+(this.addressObj.length/this.running_time
|
this.queue.push(document.URL+"&p="+(i+1));
};
/*初始化线程数,不允许线程超过页数*/
|
random_line_split
|
|
value.rs
|
<ValueWrapper>,
}
impl Default for Set {
fn default() -> Self {
Set {
mutability: IterableMutability::Mutable,
content: LinkedHashSet::new(),
}
}
}
impl Set {
pub fn empty() -> Value {
Value::new(Set::default())
}
pub fn from<V: Into<Value>>(values: Vec<V>) -> Result<Value, ValueError> {
let mut result = Self::default();
for v in values.into_iter() {
result
.content
.insert_if_absent(ValueWrapper::new(v.into())?);
}
Ok(Value::new(result))
}
pub fn insert_if_absent(set: &Value, v: Value) -> Result<Value, ValueError> {
let v = v.clone_for_container_value(set)?;
Self::mutate(set, &|hashset| {
hashset.insert_if_absent(ValueWrapper::new(v.clone())?);
Ok(Value::from(None))
})
}
pub fn mutate(
v: &Value,
f: &Fn(&mut LinkedHashSet<ValueWrapper>) -> ValueResult,
) -> ValueResult {
if v.get_type() != "set" {
Err(ValueError::IncorrectParameterType)
} else {
let mut v = v.clone();
v.downcast_apply_mut(|x: &mut Set| -> ValueResult {
x.mutability.test()?;
f(&mut x.content)
})
}
}
pub fn compare<Return>(
v1: &Value,
v2: &Value,
f: &Fn(
&LinkedHashSet<ValueWrapper>,
&LinkedHashSet<ValueWrapper>,
) -> Result<Return, ValueError>,
) -> Result<Return, ValueError> {
if v1.get_type() != "set" || v2.get_type() != "set" {
Err(ValueError::IncorrectParameterType)
} else {
v1.downcast_apply(|v1: &Set| v2.downcast_apply(|v2: &Set| f(&v1.content, &v2.content)))
}
}
}
impl TypedValue for Set {
any!();
define_iterable_mutability!(mutability);
fn freeze(&mut self) {
self.mutability.freeze();
let mut new = LinkedHashSet::with_capacity(self.content.len());
while !self.content.is_empty() {
let mut value = self.content.pop_front().unwrap();
value.value.borrow_mut().freeze();
new.insert(value);
}
self.content = new;
}
/// Returns a string representation for the set
///
/// # Examples:
/// ```
/// # use starlark::values::*;
/// # use starlark::values::list::List;
/// assert_eq!("[1, 2, 3]", Value::from(vec![1, 2, 3]).to_str());
/// assert_eq!("[1, [2, 3]]",
/// Value::from(vec![Value::from(1), Value::from(vec![2, 3])]).to_str());
/// assert_eq!("[1]", Value::from(vec![1]).to_str());
/// assert_eq!("[]", Value::from(Vec::<i64>::new()).to_str());
/// ```
fn to_str(&self) -> String {
format!(
"{{{}}}",
self.content
.iter()
.map(|x| x.value.to_repr(),)
.enumerate()
.fold("".to_string(), |accum, s| if s.0 == 0 {
accum + &s.1
} else {
accum + ", " + &s.1
},)
)
}
fn to_repr(&self) -> String {
self.to_str()
}
not_supported!(to_int);
fn get_type(&self) -> &'static str {
"set"
}
fn to_bool(&self) -> bool {
!self.content.is_empty()
}
fn compare(&self, other: &TypedValue, _recursion: u32) -> Result<Ordering, ValueError> {
if other.get_type() == "set" {
let other = other.as_any().downcast_ref::<Self>().unwrap();
if self
.content
.symmetric_difference(&other.content)
.next()
.is_none()
{
return Ok(Ordering::Equal);
}
// Comparing based on hash value isn't particularly meaningful to users, who may expect
// sets to compare based on, say, their size, or comparing their elements.
// We do this because it's guaranteed to provide a consistent ordering for any pair of
// sets. We should consider better defining the sort order of sets if users complain.
let l = self.get_hash().unwrap();
let r = other.get_hash().unwrap();
if l <= r {
Ok(Ordering::Less)
} else {
Ok(Ordering::Greater)
}
} else {
default_compare(self, other)
}
}
fn at(&self, index: Value) -> ValueResult {
let i = index.convert_index(self.length()?)? as usize;
let to_skip = if i == 0 { 0 } else { i - 1 };
Ok(self.content.iter().nth(to_skip).unwrap().value.clone())
}
fn length(&self) -> Result<i64, ValueError> {
Ok(self.content.len() as i64)
}
fn is_in(&self, other: &Value) -> ValueResult {
Ok(Value::new(
self.content.contains(&ValueWrapper::new(other.clone())?),
))
}
fn is_descendant(&self, other: &TypedValue) -> bool {
self.content
.iter()
.any(|x| x.value.same_as(other) || x.value.is_descendant(other))
}
fn slice(
&self,
start: Option<Value>,
stop: Option<Value>,
stride: Option<Value>,
) -> ValueResult {
let (start, stop, stride) =
Value::convert_slice_indices(self.length()?, start, stop, stride)?;
Ok(Value::from(tuple::slice_vector(
start,
stop,
stride,
self.content.iter().map(|v| &v.value),
)))
}
fn iter<'a>(&'a self) -> Result<Box<Iterator<Item = Value> + 'a>, ValueError> {
Ok(Box::new(self.content.iter().map(|x| x.value.clone())))
}
/// Concatenate `other` to the current value.
///
/// `other` has to be a set.
///
/// # Example
///
/// ```rust
/// # use starlark::values::*;
/// # use starlark::values::list::List;
/// # assert!(
/// // {1, 2, 3} + {2, 3, 4} == {1, 2, 3, 4}
/// Value::from(vec![1,2,3]).add(Value::from(vec![2,3])).unwrap()
/// == Value::from(vec![1, 2, 3, 2, 3])
/// # );
/// ```
fn add(&self, other: Value) -> ValueResult {
if other.get_type() == "set" {
let mut result = Set {
mutability: IterableMutability::Mutable,
content: LinkedHashSet::new(),
};
for x in self.content.iter() {
result.content.insert(x.clone());
}
for x in other.iter()? {
result
.content
.insert_if_absent(ValueWrapper::new(x.clone())?);
}
Ok(Value::new(result))
|
}
}
fn get_hash(&self) -> Result<u64, ValueError> {
Ok(self
.content
.iter()
.map(|v| v.precomputed_hash)
.map(Wrapping)
.fold(Wrapping(0_u64), |acc, v| acc + v)
.0)
}
not_supported!(mul, set_at);
not_supported!(attr, function);
not_supported!(plus, minus, sub, div, pipe, percent, floor_div);
}
#[derive(Clone)]
pub struct ValueWrapper {
pub value: Value,
// Precompute the hash to verify that the value is hashable. Eagerly error if it's not, so that
// the caller who wants to use the ValueWrapper knows it can't be done.
precomputed_hash: u64,
}
impl ValueWrapper {
pub fn new(value: Value) -> Result<ValueWrapper, ValueError> {
let precomputed_hash = value.get_hash()?;
Ok(ValueWrapper {
value,
precomputed_hash,
})
}
}
impl Into<Value> for &ValueWrapper {
fn into(self) -> Value {
self.clone().value
}
}
impl PartialEq for ValueWrapper {
fn eq(&self, other: &ValueWrapper) -> bool {
self.value.compare(&other.value, 0) == Ok(Ordering::Equal)
}
}
impl Eq for ValueWrapper {}
impl Hash for ValueWrapper {
fn hash<H: Hasher>(&self, h: &mut H) {
h.write_u64(self.precomputed_hash);
}
}
impl Into<Value> for ValueWrapper {
fn into(self) -> Value {
self.value
}
}
#[cfg
|
} else {
Err(ValueError::IncorrectParameterType)
|
random_line_split
|
value.rs
|
Wrapper>,
}
impl Default for Set {
fn default() -> Self {
Set {
mutability: IterableMutability::Mutable,
content: LinkedHashSet::new(),
}
}
}
impl Set {
pub fn empty() -> Value {
Value::new(Set::default())
}
pub fn from<V: Into<Value>>(values: Vec<V>) -> Result<Value, ValueError> {
let mut result = Self::default();
for v in values.into_iter() {
result
.content
.insert_if_absent(ValueWrapper::new(v.into())?);
}
Ok(Value::new(result))
}
pub fn insert_if_absent(set: &Value, v: Value) -> Result<Value, ValueError> {
let v = v.clone_for_container_value(set)?;
Self::mutate(set, &|hashset| {
hashset.insert_if_absent(ValueWrapper::new(v.clone())?);
Ok(Value::from(None))
})
}
pub fn mutate(
v: &Value,
f: &Fn(&mut LinkedHashSet<ValueWrapper>) -> ValueResult,
) -> ValueResult {
if v.get_type() != "set"
|
else {
let mut v = v.clone();
v.downcast_apply_mut(|x: &mut Set| -> ValueResult {
x.mutability.test()?;
f(&mut x.content)
})
}
}
pub fn compare<Return>(
v1: &Value,
v2: &Value,
f: &Fn(
&LinkedHashSet<ValueWrapper>,
&LinkedHashSet<ValueWrapper>,
) -> Result<Return, ValueError>,
) -> Result<Return, ValueError> {
if v1.get_type() != "set" || v2.get_type() != "set" {
Err(ValueError::IncorrectParameterType)
} else {
v1.downcast_apply(|v1: &Set| v2.downcast_apply(|v2: &Set| f(&v1.content, &v2.content)))
}
}
}
impl TypedValue for Set {
any!();
define_iterable_mutability!(mutability);
fn freeze(&mut self) {
self.mutability.freeze();
let mut new = LinkedHashSet::with_capacity(self.content.len());
while !self.content.is_empty() {
let mut value = self.content.pop_front().unwrap();
value.value.borrow_mut().freeze();
new.insert(value);
}
self.content = new;
}
/// Returns a string representation for the set
///
/// # Examples:
/// ```
/// # use starlark::values::*;
/// # use starlark::values::list::List;
/// assert_eq!("[1, 2, 3]", Value::from(vec![1, 2, 3]).to_str());
/// assert_eq!("[1, [2, 3]]",
/// Value::from(vec![Value::from(1), Value::from(vec![2, 3])]).to_str());
/// assert_eq!("[1]", Value::from(vec![1]).to_str());
/// assert_eq!("[]", Value::from(Vec::<i64>::new()).to_str());
/// ```
fn to_str(&self) -> String {
format!(
"{{{}}}",
self.content
.iter()
.map(|x| x.value.to_repr(),)
.enumerate()
.fold("".to_string(), |accum, s| if s.0 == 0 {
accum + &s.1
} else {
accum + ", " + &s.1
},)
)
}
fn to_repr(&self) -> String {
self.to_str()
}
not_supported!(to_int);
fn get_type(&self) -> &'static str {
"set"
}
fn to_bool(&self) -> bool {
!self.content.is_empty()
}
fn compare(&self, other: &TypedValue, _recursion: u32) -> Result<Ordering, ValueError> {
if other.get_type() == "set" {
let other = other.as_any().downcast_ref::<Self>().unwrap();
if self
.content
.symmetric_difference(&other.content)
.next()
.is_none()
{
return Ok(Ordering::Equal);
}
// Comparing based on hash value isn't particularly meaningful to users, who may expect
// sets to compare based on, say, their size, or comparing their elements.
// We do this because it's guaranteed to provide a consistent ordering for any pair of
// sets. We should consider better defining the sort order of sets if users complain.
let l = self.get_hash().unwrap();
let r = other.get_hash().unwrap();
if l <= r {
Ok(Ordering::Less)
} else {
Ok(Ordering::Greater)
}
} else {
default_compare(self, other)
}
}
fn at(&self, index: Value) -> ValueResult {
let i = index.convert_index(self.length()?)? as usize;
let to_skip = if i == 0 { 0 } else { i - 1 };
Ok(self.content.iter().nth(to_skip).unwrap().value.clone())
}
fn length(&self) -> Result<i64, ValueError> {
Ok(self.content.len() as i64)
}
fn is_in(&self, other: &Value) -> ValueResult {
Ok(Value::new(
self.content.contains(&ValueWrapper::new(other.clone())?),
))
}
fn is_descendant(&self, other: &TypedValue) -> bool {
self.content
.iter()
.any(|x| x.value.same_as(other) || x.value.is_descendant(other))
}
fn slice(
&self,
start: Option<Value>,
stop: Option<Value>,
stride: Option<Value>,
) -> ValueResult {
let (start, stop, stride) =
Value::convert_slice_indices(self.length()?, start, stop, stride)?;
Ok(Value::from(tuple::slice_vector(
start,
stop,
stride,
self.content.iter().map(|v| &v.value),
)))
}
fn iter<'a>(&'a self) -> Result<Box<Iterator<Item = Value> + 'a>, ValueError> {
Ok(Box::new(self.content.iter().map(|x| x.value.clone())))
}
/// Concatenate `other` to the current value.
///
/// `other` has to be a set.
///
/// # Example
///
/// ```rust
/// # use starlark::values::*;
/// # use starlark::values::list::List;
/// # assert!(
/// // {1, 2, 3} + {2, 3, 4} == {1, 2, 3, 4}
/// Value::from(vec![1,2,3]).add(Value::from(vec![2,3])).unwrap()
/// == Value::from(vec![1, 2, 3, 2, 3])
/// # );
/// ```
fn add(&self, other: Value) -> ValueResult {
if other.get_type() == "set" {
let mut result = Set {
mutability: IterableMutability::Mutable,
content: LinkedHashSet::new(),
};
for x in self.content.iter() {
result.content.insert(x.clone());
}
for x in other.iter()? {
result
.content
.insert_if_absent(ValueWrapper::new(x.clone())?);
}
Ok(Value::new(result))
} else {
Err(ValueError::IncorrectParameterType)
}
}
fn get_hash(&self) -> Result<u64, ValueError> {
Ok(self
.content
.iter()
.map(|v| v.precomputed_hash)
.map(Wrapping)
.fold(Wrapping(0_u64), |acc, v| acc + v)
.0)
}
not_supported!(mul, set_at);
not_supported!(attr, function);
not_supported!(plus, minus, sub, div, pipe, percent, floor_div);
}
#[derive(Clone)]
pub struct ValueWrapper {
pub value: Value,
// Precompute the hash to verify that the value is hashable. Eagerly error if it's not, so that
// the caller who wants to use the ValueWrapper knows it can't be done.
precomputed_hash: u64,
}
impl ValueWrapper {
pub fn new(value: Value) -> Result<ValueWrapper, ValueError> {
let precomputed_hash = value.get_hash()?;
Ok(ValueWrapper {
value,
precomputed_hash,
})
}
}
impl Into<Value> for &ValueWrapper {
fn into(self) -> Value {
self.clone().value
}
}
impl PartialEq for ValueWrapper {
fn eq(&self, other: &ValueWrapper) -> bool {
self.value.compare(&other.value, 0) == Ok(Ordering::Equal)
}
}
impl Eq for ValueWrapper {}
impl Hash for ValueWrapper {
fn hash<H: Hasher>(&self, h: &mut H) {
h.write_u64(self.precomputed_hash);
}
}
impl Into<Value> for ValueWrapper {
fn into(self) -> Value {
self.value
}
}
#[cfg
|
{
Err(ValueError::IncorrectParameterType)
}
|
conditional_block
|
value.rs
|
Wrapper>,
}
impl Default for Set {
fn default() -> Self {
Set {
mutability: IterableMutability::Mutable,
content: LinkedHashSet::new(),
}
}
}
impl Set {
pub fn empty() -> Value {
Value::new(Set::default())
}
pub fn from<V: Into<Value>>(values: Vec<V>) -> Result<Value, ValueError> {
let mut result = Self::default();
for v in values.into_iter() {
result
.content
.insert_if_absent(ValueWrapper::new(v.into())?);
}
Ok(Value::new(result))
}
pub fn insert_if_absent(set: &Value, v: Value) -> Result<Value, ValueError> {
let v = v.clone_for_container_value(set)?;
Self::mutate(set, &|hashset| {
hashset.insert_if_absent(ValueWrapper::new(v.clone())?);
Ok(Value::from(None))
})
}
pub fn mutate(
v: &Value,
f: &Fn(&mut LinkedHashSet<ValueWrapper>) -> ValueResult,
) -> ValueResult {
if v.get_type() != "set" {
Err(ValueError::IncorrectParameterType)
} else {
let mut v = v.clone();
v.downcast_apply_mut(|x: &mut Set| -> ValueResult {
x.mutability.test()?;
f(&mut x.content)
})
}
}
pub fn compare<Return>(
v1: &Value,
v2: &Value,
f: &Fn(
&LinkedHashSet<ValueWrapper>,
&LinkedHashSet<ValueWrapper>,
) -> Result<Return, ValueError>,
) -> Result<Return, ValueError> {
if v1.get_type() != "set" || v2.get_type() != "set" {
Err(ValueError::IncorrectParameterType)
} else {
v1.downcast_apply(|v1: &Set| v2.downcast_apply(|v2: &Set| f(&v1.content, &v2.content)))
}
}
}
impl TypedValue for Set {
any!();
define_iterable_mutability!(mutability);
fn freeze(&mut self) {
self.mutability.freeze();
let mut new = LinkedHashSet::with_capacity(self.content.len());
while !self.content.is_empty() {
let mut value = self.content.pop_front().unwrap();
value.value.borrow_mut().freeze();
new.insert(value);
}
self.content = new;
}
/// Returns a string representation for the set
///
/// # Examples:
/// ```
/// # use starlark::values::*;
/// # use starlark::values::list::List;
/// assert_eq!("[1, 2, 3]", Value::from(vec![1, 2, 3]).to_str());
/// assert_eq!("[1, [2, 3]]",
/// Value::from(vec![Value::from(1), Value::from(vec![2, 3])]).to_str());
/// assert_eq!("[1]", Value::from(vec![1]).to_str());
/// assert_eq!("[]", Value::from(Vec::<i64>::new()).to_str());
/// ```
fn to_str(&self) -> String {
format!(
"{{{}}}",
self.content
.iter()
.map(|x| x.value.to_repr(),)
.enumerate()
.fold("".to_string(), |accum, s| if s.0 == 0 {
accum + &s.1
} else {
accum + ", " + &s.1
},)
)
}
fn to_repr(&self) -> String {
self.to_str()
}
not_supported!(to_int);
fn get_type(&self) -> &'static str {
"set"
}
fn to_bool(&self) -> bool {
!self.content.is_empty()
}
fn compare(&self, other: &TypedValue, _recursion: u32) -> Result<Ordering, ValueError> {
if other.get_type() == "set" {
let other = other.as_any().downcast_ref::<Self>().unwrap();
if self
.content
.symmetric_difference(&other.content)
.next()
.is_none()
{
return Ok(Ordering::Equal);
}
// Comparing based on hash value isn't particularly meaningful to users, who may expect
// sets to compare based on, say, their size, or comparing their elements.
// We do this because it's guaranteed to provide a consistent ordering for any pair of
// sets. We should consider better defining the sort order of sets if users complain.
let l = self.get_hash().unwrap();
let r = other.get_hash().unwrap();
if l <= r {
Ok(Ordering::Less)
} else {
Ok(Ordering::Greater)
}
} else {
default_compare(self, other)
}
}
fn at(&self, index: Value) -> ValueResult {
let i = index.convert_index(self.length()?)? as usize;
let to_skip = if i == 0 { 0 } else { i - 1 };
Ok(self.content.iter().nth(to_skip).unwrap().value.clone())
}
fn length(&self) -> Result<i64, ValueError> {
Ok(self.content.len() as i64)
}
fn is_in(&self, other: &Value) -> ValueResult
|
fn is_descendant(&self, other: &TypedValue) -> bool {
self.content
.iter()
.any(|x| x.value.same_as(other) || x.value.is_descendant(other))
}
fn slice(
&self,
start: Option<Value>,
stop: Option<Value>,
stride: Option<Value>,
) -> ValueResult {
let (start, stop, stride) =
Value::convert_slice_indices(self.length()?, start, stop, stride)?;
Ok(Value::from(tuple::slice_vector(
start,
stop,
stride,
self.content.iter().map(|v| &v.value),
)))
}
fn iter<'a>(&'a self) -> Result<Box<Iterator<Item = Value> + 'a>, ValueError> {
Ok(Box::new(self.content.iter().map(|x| x.value.clone())))
}
/// Concatenate `other` to the current value.
///
/// `other` has to be a set.
///
/// # Example
///
/// ```rust
/// # use starlark::values::*;
/// # use starlark::values::list::List;
/// # assert!(
/// // {1, 2, 3} + {2, 3, 4} == {1, 2, 3, 4}
/// Value::from(vec![1,2,3]).add(Value::from(vec![2,3])).unwrap()
/// == Value::from(vec![1, 2, 3, 2, 3])
/// # );
/// ```
fn add(&self, other: Value) -> ValueResult {
if other.get_type() == "set" {
let mut result = Set {
mutability: IterableMutability::Mutable,
content: LinkedHashSet::new(),
};
for x in self.content.iter() {
result.content.insert(x.clone());
}
for x in other.iter()? {
result
.content
.insert_if_absent(ValueWrapper::new(x.clone())?);
}
Ok(Value::new(result))
} else {
Err(ValueError::IncorrectParameterType)
}
}
fn get_hash(&self) -> Result<u64, ValueError> {
Ok(self
.content
.iter()
.map(|v| v.precomputed_hash)
.map(Wrapping)
.fold(Wrapping(0_u64), |acc, v| acc + v)
.0)
}
not_supported!(mul, set_at);
not_supported!(attr, function);
not_supported!(plus, minus, sub, div, pipe, percent, floor_div);
}
#[derive(Clone)]
pub struct ValueWrapper {
pub value: Value,
// Precompute the hash to verify that the value is hashable. Eagerly error if it's not, so that
// the caller who wants to use the ValueWrapper knows it can't be done.
precomputed_hash: u64,
}
impl ValueWrapper {
pub fn new(value: Value) -> Result<ValueWrapper, ValueError> {
let precomputed_hash = value.get_hash()?;
Ok(ValueWrapper {
value,
precomputed_hash,
})
}
}
impl Into<Value> for &ValueWrapper {
fn into(self) -> Value {
self.clone().value
}
}
impl PartialEq for ValueWrapper {
fn eq(&self, other: &ValueWrapper) -> bool {
self.value.compare(&other.value, 0) == Ok(Ordering::Equal)
}
}
impl Eq for ValueWrapper {}
impl Hash for ValueWrapper {
fn hash<H: Hasher>(&self, h: &mut H) {
h.write_u64(self.precomputed_hash);
}
}
impl Into<Value> for ValueWrapper {
fn into(self) -> Value {
self.value
}
}
#[
|
{
Ok(Value::new(
self.content.contains(&ValueWrapper::new(other.clone())?),
))
}
|
identifier_body
|
value.rs
|
Wrapper>,
}
impl Default for Set {
fn
|
() -> Self {
Set {
mutability: IterableMutability::Mutable,
content: LinkedHashSet::new(),
}
}
}
impl Set {
pub fn empty() -> Value {
Value::new(Set::default())
}
pub fn from<V: Into<Value>>(values: Vec<V>) -> Result<Value, ValueError> {
let mut result = Self::default();
for v in values.into_iter() {
result
.content
.insert_if_absent(ValueWrapper::new(v.into())?);
}
Ok(Value::new(result))
}
pub fn insert_if_absent(set: &Value, v: Value) -> Result<Value, ValueError> {
let v = v.clone_for_container_value(set)?;
Self::mutate(set, &|hashset| {
hashset.insert_if_absent(ValueWrapper::new(v.clone())?);
Ok(Value::from(None))
})
}
pub fn mutate(
v: &Value,
f: &Fn(&mut LinkedHashSet<ValueWrapper>) -> ValueResult,
) -> ValueResult {
if v.get_type() != "set" {
Err(ValueError::IncorrectParameterType)
} else {
let mut v = v.clone();
v.downcast_apply_mut(|x: &mut Set| -> ValueResult {
x.mutability.test()?;
f(&mut x.content)
})
}
}
pub fn compare<Return>(
v1: &Value,
v2: &Value,
f: &Fn(
&LinkedHashSet<ValueWrapper>,
&LinkedHashSet<ValueWrapper>,
) -> Result<Return, ValueError>,
) -> Result<Return, ValueError> {
if v1.get_type() != "set" || v2.get_type() != "set" {
Err(ValueError::IncorrectParameterType)
} else {
v1.downcast_apply(|v1: &Set| v2.downcast_apply(|v2: &Set| f(&v1.content, &v2.content)))
}
}
}
impl TypedValue for Set {
any!();
define_iterable_mutability!(mutability);
fn freeze(&mut self) {
self.mutability.freeze();
let mut new = LinkedHashSet::with_capacity(self.content.len());
while !self.content.is_empty() {
let mut value = self.content.pop_front().unwrap();
value.value.borrow_mut().freeze();
new.insert(value);
}
self.content = new;
}
/// Returns a string representation for the set
///
/// # Examples:
/// ```
/// # use starlark::values::*;
/// # use starlark::values::list::List;
/// assert_eq!("[1, 2, 3]", Value::from(vec![1, 2, 3]).to_str());
/// assert_eq!("[1, [2, 3]]",
/// Value::from(vec![Value::from(1), Value::from(vec![2, 3])]).to_str());
/// assert_eq!("[1]", Value::from(vec![1]).to_str());
/// assert_eq!("[]", Value::from(Vec::<i64>::new()).to_str());
/// ```
fn to_str(&self) -> String {
format!(
"{{{}}}",
self.content
.iter()
.map(|x| x.value.to_repr(),)
.enumerate()
.fold("".to_string(), |accum, s| if s.0 == 0 {
accum + &s.1
} else {
accum + ", " + &s.1
},)
)
}
fn to_repr(&self) -> String {
self.to_str()
}
not_supported!(to_int);
fn get_type(&self) -> &'static str {
"set"
}
fn to_bool(&self) -> bool {
!self.content.is_empty()
}
fn compare(&self, other: &TypedValue, _recursion: u32) -> Result<Ordering, ValueError> {
if other.get_type() == "set" {
let other = other.as_any().downcast_ref::<Self>().unwrap();
if self
.content
.symmetric_difference(&other.content)
.next()
.is_none()
{
return Ok(Ordering::Equal);
}
// Comparing based on hash value isn't particularly meaningful to users, who may expect
// sets to compare based on, say, their size, or comparing their elements.
// We do this because it's guaranteed to provide a consistent ordering for any pair of
// sets. We should consider better defining the sort order of sets if users complain.
let l = self.get_hash().unwrap();
let r = other.get_hash().unwrap();
if l <= r {
Ok(Ordering::Less)
} else {
Ok(Ordering::Greater)
}
} else {
default_compare(self, other)
}
}
fn at(&self, index: Value) -> ValueResult {
let i = index.convert_index(self.length()?)? as usize;
let to_skip = if i == 0 { 0 } else { i - 1 };
Ok(self.content.iter().nth(to_skip).unwrap().value.clone())
}
fn length(&self) -> Result<i64, ValueError> {
Ok(self.content.len() as i64)
}
fn is_in(&self, other: &Value) -> ValueResult {
Ok(Value::new(
self.content.contains(&ValueWrapper::new(other.clone())?),
))
}
fn is_descendant(&self, other: &TypedValue) -> bool {
self.content
.iter()
.any(|x| x.value.same_as(other) || x.value.is_descendant(other))
}
fn slice(
&self,
start: Option<Value>,
stop: Option<Value>,
stride: Option<Value>,
) -> ValueResult {
let (start, stop, stride) =
Value::convert_slice_indices(self.length()?, start, stop, stride)?;
Ok(Value::from(tuple::slice_vector(
start,
stop,
stride,
self.content.iter().map(|v| &v.value),
)))
}
fn iter<'a>(&'a self) -> Result<Box<Iterator<Item = Value> + 'a>, ValueError> {
Ok(Box::new(self.content.iter().map(|x| x.value.clone())))
}
/// Concatenate `other` to the current value.
///
/// `other` has to be a set.
///
/// # Example
///
/// ```rust
/// # use starlark::values::*;
/// # use starlark::values::list::List;
/// # assert!(
/// // {1, 2, 3} + {2, 3, 4} == {1, 2, 3, 4}
/// Value::from(vec![1,2,3]).add(Value::from(vec![2,3])).unwrap()
/// == Value::from(vec![1, 2, 3, 2, 3])
/// # );
/// ```
fn add(&self, other: Value) -> ValueResult {
if other.get_type() == "set" {
let mut result = Set {
mutability: IterableMutability::Mutable,
content: LinkedHashSet::new(),
};
for x in self.content.iter() {
result.content.insert(x.clone());
}
for x in other.iter()? {
result
.content
.insert_if_absent(ValueWrapper::new(x.clone())?);
}
Ok(Value::new(result))
} else {
Err(ValueError::IncorrectParameterType)
}
}
fn get_hash(&self) -> Result<u64, ValueError> {
Ok(self
.content
.iter()
.map(|v| v.precomputed_hash)
.map(Wrapping)
.fold(Wrapping(0_u64), |acc, v| acc + v)
.0)
}
not_supported!(mul, set_at);
not_supported!(attr, function);
not_supported!(plus, minus, sub, div, pipe, percent, floor_div);
}
#[derive(Clone)]
pub struct ValueWrapper {
pub value: Value,
// Precompute the hash to verify that the value is hashable. Eagerly error if it's not, so that
// the caller who wants to use the ValueWrapper knows it can't be done.
precomputed_hash: u64,
}
impl ValueWrapper {
pub fn new(value: Value) -> Result<ValueWrapper, ValueError> {
let precomputed_hash = value.get_hash()?;
Ok(ValueWrapper {
value,
precomputed_hash,
})
}
}
impl Into<Value> for &ValueWrapper {
fn into(self) -> Value {
self.clone().value
}
}
impl PartialEq for ValueWrapper {
fn eq(&self, other: &ValueWrapper) -> bool {
self.value.compare(&other.value, 0) == Ok(Ordering::Equal)
}
}
impl Eq for ValueWrapper {}
impl Hash for ValueWrapper {
fn hash<H: Hasher>(&self, h: &mut H) {
h.write_u64(self.precomputed_hash);
}
}
impl Into<Value> for ValueWrapper {
fn into(self) -> Value {
self.value
}
}
#[cfg
|
default
|
identifier_name
|
dsp.py
|
self.fft_plot_filter = ExpFilter(np.tile(1e-1, n_fft_bins), alpha_decay=0.5, alpha_rise=0.99)
self.mel_gain = ExpFilter(np.tile(1e-1, n_fft_bins), alpha_decay=0.01, alpha_rise=0.99)
self.mel_smoothing = ExpFilter(np.tile(1e-1, n_fft_bins), alpha_decay=0.5, alpha_rise=0.99)
self.gain = ExpFilter(np.tile(0.01, n_fft_bins), alpha_decay=0.001, alpha_rise=0.99)
self.r_filt = ExpFilter(np.tile(0.01, led_count // 2), alpha_decay=0.2, alpha_rise=0.99)
self.g_filt = ExpFilter(np.tile(0.01, led_count // 2), alpha_decay=0.05, alpha_rise=0.3)
self.b_filt = ExpFilter(np.tile(0.01, led_count // 2), alpha_decay=0.1, alpha_rise=0.5)
self.common_mode = ExpFilter(np.tile(0.01, led_count // 2), alpha_decay=0.99, alpha_rise=0.01)
self.p_filt = ExpFilter(np.tile(1, (3, led_count // 2)), alpha_decay=0.1, alpha_rise=0.99)
self.volume = ExpFilter(min_volume_threshold, alpha_decay=0.02, alpha_rise=0.02)
self.p = np.tile(1.0, (3, led_count // 2))
# Number of audio samples to read every time frame.
# self.samples_per_frame = int(default_sample_rate / fps)
self.samples_per_frame = int(frames_per_buffer)
# Array containing the rolling audio sample window.
self.y_roll = np.random.rand(n_rolling_history, self.samples_per_frame) / 1e16
self.fft_window = np.hamming(int(frames_per_buffer) * n_rolling_history)
self.samples = None
self.mel_y = None
self.mel_x = None
self.melbank = Melbank()
self.create_mel_bank()
def update(self, audio_samples):
"""
Return processed audio data.
Returns mel curve, x/y data.
This method is called every time there is a microphone update.
Returns:
-------
audio_data: dict
Dict containing "mel", "vol", "x", and "y".
"""
min_frequency = self._config["general_settings"]["min_frequency"]
max_frequency = self._config["general_settings"]["max_frequency"]
audio_data = {}
# Normalize samples between 0 and 1.
y = audio_samples / 2.0**15
# Construct a rolling window of audio samples.
self.y_roll[:-1] = self.y_roll[1:]
self.y_roll[-1, :] = np.copy(y)
y_data = np.concatenate(self.y_roll, axis=0).astype(np.float32)
vol = np.max(np.abs(y_data))
# Transform audio input into the frequency domain.
N = len(y_data)
N_zeros = 2**int(np.ceil(np.log2(N))) - N
# Pad with zeros until the next power of two.
y_data *= self.fft_window
y_padded = np.pad(y_data, (0, N_zeros), mode='constant')
YS = np.abs(np.fft.rfft(y_padded)[:N // 2])
# Construct a Mel filterbank from the FFT data.
mel = np.atleast_2d(YS).T * self.mel_y.T
# Scale data to values more suitable for visualization.
mel = np.sum(mel, axis=0)
mel = mel**2.0
# Gain normalization.
self.mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0)))
mel /= self.mel_gain.value
mel = self.mel_smoothing.update(mel)
x = np.linspace(min_frequency, max_frequency, len(mel))
y = self.fft_plot_filter.update(mel)
audio_data["mel"] = mel
audio_data["vol"] = vol
audio_data["x"] = x
audio_data["y"] = y
return audio_data
def rfft(self, data, window=None):
default_sample_rate = self._config["general_settings"]["default_sample_rate"]
window = 1.0 if window is None else window(len(data))
ys = np.abs(np.fft.rfft(data * window))
xs = np.fft.rfftfreq(len(data), 1.0 / default_sample_rate)
return xs, ys
def fft(self, data, window=None):
default_sample_rate = self._config["general_settings"]["default_sample_rate"]
window = 1.0 if window is None else window(len(data))
ys = np.fft.fft(data * window)
xs = np.fft.fftfreq(len(data), 1.0 / default_sample_rate)
return xs, ys
def create_mel_bank(self):
default_sample_rate = self._config["general_settings"]["default_sample_rate"]
min_frequency = self._config["general_settings"]["min_frequency"]
max_frequency = self._config["general_settings"]["max_frequency"]
frames_per_buffer = self._config["general_settings"]["frames_per_buffer"]
n_rolling_history = self._config["general_settings"]["n_rolling_history"]
n_fft_bins = self._config["general_settings"]["n_fft_bins"]
samples = int(frames_per_buffer * (n_rolling_history / 2))
self.mel_y, (_, self.mel_x) = self.melbank.compute_melmat(
num_mel_bands=n_fft_bins,
freq_min=min_frequency,
freq_max=max_frequency,
num_fft_bands=samples,
sample_rate=default_sample_rate
)
class ExpFilter():
"""Simple exponential smoothing filter."""
def __init__(self, val=0.0, alpha_decay=0.5, alpha_rise=0.5):
"""Small rise/decay factors = more smoothing."""
assert 0.0 < alpha_decay < 1.0, 'Invalid decay smoothing factor.'
assert 0.0 < alpha_rise < 1.0, 'Invalid rise smoothing factor.'
self.alpha_decay = alpha_decay
self.alpha_rise = alpha_rise
self.value = val
def update(self, value):
if isinstance(self.value, (list, np.ndarray, tuple)):
alpha = value - self.value
alpha[alpha > 0.0] = self.alpha_rise
alpha[alpha <= 0.0] = self.alpha_decay
else:
alpha = self.alpha_rise if value > self.value else self.alpha_decay
self.value = alpha * value + (1.0 - alpha) * self.value
return self.value
class Melbank():
"""This class implements a Mel Filter Bank.
In other words it is a filter bank with triangular shaped bands
arranged on the mel frequency scale.
An example is shown in the following figure:
.. plot::
from pylab import plt
import melbank
f1, f2 = 1000, 8000
melmat, (melfreq, fftfreq) = melbank.compute_melmat(6, f1, f2, num_fft_bands=4097)
fig, ax = plt.subplots(figsize=(8, 3))
ax.plot(fftfreq, melmat.T)
ax.grid(True)
ax.set_ylabel('Weight')
ax.set_xlabel('Frequency / Hz')
ax.set_xlim((f1, f2))
ax2 = ax.twiny()
ax2.xaxis.set_ticks_position('top')
ax2.set_xlim((f1, f2))
ax2.xaxis.set_ticks(melbank.mel_to_hertz(melfreq))
ax2.xaxis.set_ticklabels(['{:.0f}'.format(mf) for mf in melfreq])
ax2.set_xlabel('Frequency / mel')
plt.tight_layout()
fig, ax = plt.subplots()
ax.matshow(melmat)
plt.axis('equal')
plt.axis('tight')
plt.title('Mel Matrix')
plt.tight_layout()
Functions
---------
"""
def hertz_to_mel(self, freq):
"""
Returns mel-frequency from linear frequency input.
Parameter
---------
freq : scalar or ndarray
Frequency value or array in Hz.
Returns
-------
mel : scalar or ndarray
Mel-frequency value or ndarray in Mel
"""
return 3340.0 * log(1 + (freq / 250.0), 9)
def mel_to_hertz(self, mel):
"""
Returns frequency from mel-frequency input.
Parameter
---------
mel : scalar or ndarray
Mel-frequency value or ndarray in Mel
Returns
-------
freq : scalar or ndarray
Frequency value or array in Hz.
"""
# return 700.0 * (10**(mel / 2595.0)) - 700.0
return
|
led_count = self._device_config["led_count"]
|
conditional_block
|
|
dsp.py
|
_decay=0.001, alpha_rise=0.99)
self.r_filt = ExpFilter(np.tile(0.01, led_count // 2), alpha_decay=0.2, alpha_rise=0.99)
self.g_filt = ExpFilter(np.tile(0.01, led_count // 2), alpha_decay=0.05, alpha_rise=0.3)
self.b_filt = ExpFilter(np.tile(0.01, led_count // 2), alpha_decay=0.1, alpha_rise=0.5)
self.common_mode = ExpFilter(np.tile(0.01, led_count // 2), alpha_decay=0.99, alpha_rise=0.01)
self.p_filt = ExpFilter(np.tile(1, (3, led_count // 2)), alpha_decay=0.1, alpha_rise=0.99)
self.volume = ExpFilter(min_volume_threshold, alpha_decay=0.02, alpha_rise=0.02)
self.p = np.tile(1.0, (3, led_count // 2))
# Number of audio samples to read every time frame.
# self.samples_per_frame = int(default_sample_rate / fps)
self.samples_per_frame = int(frames_per_buffer)
# Array containing the rolling audio sample window.
self.y_roll = np.random.rand(n_rolling_history, self.samples_per_frame) / 1e16
self.fft_window = np.hamming(int(frames_per_buffer) * n_rolling_history)
self.samples = None
self.mel_y = None
self.mel_x = None
self.melbank = Melbank()
self.create_mel_bank()
def update(self, audio_samples):
"""
Return processed audio data.
Returns mel curve, x/y data.
This method is called every time there is a microphone update.
Returns:
-------
audio_data: dict
Dict containing "mel", "vol", "x", and "y".
"""
min_frequency = self._config["general_settings"]["min_frequency"]
max_frequency = self._config["general_settings"]["max_frequency"]
audio_data = {}
# Normalize samples between 0 and 1.
y = audio_samples / 2.0**15
# Construct a rolling window of audio samples.
self.y_roll[:-1] = self.y_roll[1:]
self.y_roll[-1, :] = np.copy(y)
y_data = np.concatenate(self.y_roll, axis=0).astype(np.float32)
vol = np.max(np.abs(y_data))
# Transform audio input into the frequency domain.
N = len(y_data)
N_zeros = 2**int(np.ceil(np.log2(N))) - N
# Pad with zeros until the next power of two.
y_data *= self.fft_window
y_padded = np.pad(y_data, (0, N_zeros), mode='constant')
YS = np.abs(np.fft.rfft(y_padded)[:N // 2])
# Construct a Mel filterbank from the FFT data.
mel = np.atleast_2d(YS).T * self.mel_y.T
# Scale data to values more suitable for visualization.
mel = np.sum(mel, axis=0)
mel = mel**2.0
# Gain normalization.
self.mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0)))
mel /= self.mel_gain.value
mel = self.mel_smoothing.update(mel)
x = np.linspace(min_frequency, max_frequency, len(mel))
y = self.fft_plot_filter.update(mel)
audio_data["mel"] = mel
audio_data["vol"] = vol
audio_data["x"] = x
audio_data["y"] = y
return audio_data
def rfft(self, data, window=None):
default_sample_rate = self._config["general_settings"]["default_sample_rate"]
window = 1.0 if window is None else window(len(data))
ys = np.abs(np.fft.rfft(data * window))
xs = np.fft.rfftfreq(len(data), 1.0 / default_sample_rate)
return xs, ys
def fft(self, data, window=None):
default_sample_rate = self._config["general_settings"]["default_sample_rate"]
window = 1.0 if window is None else window(len(data))
ys = np.fft.fft(data * window)
xs = np.fft.fftfreq(len(data), 1.0 / default_sample_rate)
return xs, ys
def create_mel_bank(self):
default_sample_rate = self._config["general_settings"]["default_sample_rate"]
min_frequency = self._config["general_settings"]["min_frequency"]
max_frequency = self._config["general_settings"]["max_frequency"]
frames_per_buffer = self._config["general_settings"]["frames_per_buffer"]
n_rolling_history = self._config["general_settings"]["n_rolling_history"]
n_fft_bins = self._config["general_settings"]["n_fft_bins"]
samples = int(frames_per_buffer * (n_rolling_history / 2))
self.mel_y, (_, self.mel_x) = self.melbank.compute_melmat(
num_mel_bands=n_fft_bins,
freq_min=min_frequency,
freq_max=max_frequency,
num_fft_bands=samples,
sample_rate=default_sample_rate
)
class ExpFilter():
"""Simple exponential smoothing filter."""
def __init__(self, val=0.0, alpha_decay=0.5, alpha_rise=0.5):
"""Small rise/decay factors = more smoothing."""
assert 0.0 < alpha_decay < 1.0, 'Invalid decay smoothing factor.'
assert 0.0 < alpha_rise < 1.0, 'Invalid rise smoothing factor.'
self.alpha_decay = alpha_decay
self.alpha_rise = alpha_rise
self.value = val
def update(self, value):
if isinstance(self.value, (list, np.ndarray, tuple)):
alpha = value - self.value
alpha[alpha > 0.0] = self.alpha_rise
alpha[alpha <= 0.0] = self.alpha_decay
else:
alpha = self.alpha_rise if value > self.value else self.alpha_decay
self.value = alpha * value + (1.0 - alpha) * self.value
return self.value
class Melbank():
"""This class implements a Mel Filter Bank.
In other words it is a filter bank with triangular shaped bands
arranged on the mel frequency scale.
An example is shown in the following figure:
.. plot::
from pylab import plt
import melbank
f1, f2 = 1000, 8000
melmat, (melfreq, fftfreq) = melbank.compute_melmat(6, f1, f2, num_fft_bands=4097)
fig, ax = plt.subplots(figsize=(8, 3))
ax.plot(fftfreq, melmat.T)
ax.grid(True)
ax.set_ylabel('Weight')
ax.set_xlabel('Frequency / Hz')
ax.set_xlim((f1, f2))
ax2 = ax.twiny()
ax2.xaxis.set_ticks_position('top')
ax2.set_xlim((f1, f2))
ax2.xaxis.set_ticks(melbank.mel_to_hertz(melfreq))
ax2.xaxis.set_ticklabels(['{:.0f}'.format(mf) for mf in melfreq])
ax2.set_xlabel('Frequency / mel')
plt.tight_layout()
fig, ax = plt.subplots()
ax.matshow(melmat)
plt.axis('equal')
plt.axis('tight')
plt.title('Mel Matrix')
plt.tight_layout()
Functions
---------
"""
def hertz_to_mel(self, freq):
"""
Returns mel-frequency from linear frequency input.
Parameter
---------
freq : scalar or ndarray
Frequency value or array in Hz.
Returns
-------
mel : scalar or ndarray
Mel-frequency value or ndarray in Mel
"""
return 3340.0 * log(1 + (freq / 250.0), 9)
def mel_to_hertz(self, mel):
"""
Returns frequency from mel-frequency input.
Parameter
---------
mel : scalar or ndarray
Mel-frequency value or ndarray in Mel
Returns
-------
freq : scalar or ndarray
Frequency value or array in Hz.
"""
# return 700.0 * (10**(mel / 2595.0)) - 700.0
return 250.0 * (9**(mel / 3340.0)) - 250.0
def
|
(self, num_bands, freq_min, freq_max, num_fft_bands):
"""
Returns centerfrequencies and band edges for a mel filter bank
Parameters
----------
num_bands : int
Number of mel bands.
freq_min : scalar
Minimum frequency for the first band.
freq_max : scalar
Maximum frequency for the last band.
num_fft_bands : int
Number of fft bands.
Returns
-------
center_frequencies_mel
|
melfrequencies_mel_filterbank
|
identifier_name
|
dsp.py
|
self.b_filt = ExpFilter(np.tile(0.01, led_count // 2), alpha_decay=0.1, alpha_rise=0.5)
self.common_mode = ExpFilter(np.tile(0.01, led_count // 2), alpha_decay=0.99, alpha_rise=0.01)
self.p_filt = ExpFilter(np.tile(1, (3, led_count // 2)), alpha_decay=0.1, alpha_rise=0.99)
self.volume = ExpFilter(min_volume_threshold, alpha_decay=0.02, alpha_rise=0.02)
self.p = np.tile(1.0, (3, led_count // 2))
# Number of audio samples to read every time frame.
# self.samples_per_frame = int(default_sample_rate / fps)
self.samples_per_frame = int(frames_per_buffer)
# Array containing the rolling audio sample window.
self.y_roll = np.random.rand(n_rolling_history, self.samples_per_frame) / 1e16
self.fft_window = np.hamming(int(frames_per_buffer) * n_rolling_history)
self.samples = None
self.mel_y = None
self.mel_x = None
self.melbank = Melbank()
self.create_mel_bank()
def update(self, audio_samples):
"""
Return processed audio data.
Returns mel curve, x/y data.
This method is called every time there is a microphone update.
Returns:
-------
audio_data: dict
Dict containing "mel", "vol", "x", and "y".
"""
min_frequency = self._config["general_settings"]["min_frequency"]
max_frequency = self._config["general_settings"]["max_frequency"]
audio_data = {}
# Normalize samples between 0 and 1.
y = audio_samples / 2.0**15
# Construct a rolling window of audio samples.
self.y_roll[:-1] = self.y_roll[1:]
self.y_roll[-1, :] = np.copy(y)
y_data = np.concatenate(self.y_roll, axis=0).astype(np.float32)
vol = np.max(np.abs(y_data))
# Transform audio input into the frequency domain.
N = len(y_data)
N_zeros = 2**int(np.ceil(np.log2(N))) - N
# Pad with zeros until the next power of two.
y_data *= self.fft_window
y_padded = np.pad(y_data, (0, N_zeros), mode='constant')
YS = np.abs(np.fft.rfft(y_padded)[:N // 2])
# Construct a Mel filterbank from the FFT data.
mel = np.atleast_2d(YS).T * self.mel_y.T
# Scale data to values more suitable for visualization.
mel = np.sum(mel, axis=0)
mel = mel**2.0
# Gain normalization.
self.mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0)))
mel /= self.mel_gain.value
mel = self.mel_smoothing.update(mel)
x = np.linspace(min_frequency, max_frequency, len(mel))
y = self.fft_plot_filter.update(mel)
audio_data["mel"] = mel
audio_data["vol"] = vol
audio_data["x"] = x
audio_data["y"] = y
return audio_data
def rfft(self, data, window=None):
default_sample_rate = self._config["general_settings"]["default_sample_rate"]
window = 1.0 if window is None else window(len(data))
ys = np.abs(np.fft.rfft(data * window))
xs = np.fft.rfftfreq(len(data), 1.0 / default_sample_rate)
return xs, ys
def fft(self, data, window=None):
default_sample_rate = self._config["general_settings"]["default_sample_rate"]
window = 1.0 if window is None else window(len(data))
ys = np.fft.fft(data * window)
xs = np.fft.fftfreq(len(data), 1.0 / default_sample_rate)
return xs, ys
def create_mel_bank(self):
default_sample_rate = self._config["general_settings"]["default_sample_rate"]
min_frequency = self._config["general_settings"]["min_frequency"]
max_frequency = self._config["general_settings"]["max_frequency"]
frames_per_buffer = self._config["general_settings"]["frames_per_buffer"]
n_rolling_history = self._config["general_settings"]["n_rolling_history"]
n_fft_bins = self._config["general_settings"]["n_fft_bins"]
samples = int(frames_per_buffer * (n_rolling_history / 2))
self.mel_y, (_, self.mel_x) = self.melbank.compute_melmat(
num_mel_bands=n_fft_bins,
freq_min=min_frequency,
freq_max=max_frequency,
num_fft_bands=samples,
sample_rate=default_sample_rate
)
class ExpFilter():
"""Simple exponential smoothing filter."""
def __init__(self, val=0.0, alpha_decay=0.5, alpha_rise=0.5):
"""Small rise/decay factors = more smoothing."""
assert 0.0 < alpha_decay < 1.0, 'Invalid decay smoothing factor.'
assert 0.0 < alpha_rise < 1.0, 'Invalid rise smoothing factor.'
self.alpha_decay = alpha_decay
self.alpha_rise = alpha_rise
self.value = val
def update(self, value):
if isinstance(self.value, (list, np.ndarray, tuple)):
alpha = value - self.value
alpha[alpha > 0.0] = self.alpha_rise
alpha[alpha <= 0.0] = self.alpha_decay
else:
alpha = self.alpha_rise if value > self.value else self.alpha_decay
self.value = alpha * value + (1.0 - alpha) * self.value
return self.value
class Melbank():
"""This class implements a Mel Filter Bank.
In other words it is a filter bank with triangular shaped bands
arranged on the mel frequency scale.
An example is shown in the following figure:
.. plot::
from pylab import plt
import melbank
f1, f2 = 1000, 8000
melmat, (melfreq, fftfreq) = melbank.compute_melmat(6, f1, f2, num_fft_bands=4097)
fig, ax = plt.subplots(figsize=(8, 3))
ax.plot(fftfreq, melmat.T)
ax.grid(True)
ax.set_ylabel('Weight')
ax.set_xlabel('Frequency / Hz')
ax.set_xlim((f1, f2))
ax2 = ax.twiny()
ax2.xaxis.set_ticks_position('top')
ax2.set_xlim((f1, f2))
ax2.xaxis.set_ticks(melbank.mel_to_hertz(melfreq))
ax2.xaxis.set_ticklabels(['{:.0f}'.format(mf) for mf in melfreq])
ax2.set_xlabel('Frequency / mel')
plt.tight_layout()
fig, ax = plt.subplots()
ax.matshow(melmat)
plt.axis('equal')
plt.axis('tight')
plt.title('Mel Matrix')
plt.tight_layout()
Functions
---------
"""
def hertz_to_mel(self, freq):
"""
Returns mel-frequency from linear frequency input.
Parameter
---------
freq : scalar or ndarray
Frequency value or array in Hz.
Returns
-------
mel : scalar or ndarray
Mel-frequency value or ndarray in
|
self._config = config
self._device_config = device_config
# Initialise filters etc. I've no idea what most of these are for but I imagine I won't be getting rid of them soon.
n_fft_bins = self._config["general_settings"]["n_fft_bins"]
min_volume_threshold = self._config["general_settings"]["min_volume_threshold"]
frames_per_buffer = self._config["general_settings"]["frames_per_buffer"]
n_rolling_history = self._config["general_settings"]["n_rolling_history"]
if device_config is None:
led_count = 200
else:
led_count = self._device_config["led_count"]
self.fft_plot_filter = ExpFilter(np.tile(1e-1, n_fft_bins), alpha_decay=0.5, alpha_rise=0.99)
self.mel_gain = ExpFilter(np.tile(1e-1, n_fft_bins), alpha_decay=0.01, alpha_rise=0.99)
self.mel_smoothing = ExpFilter(np.tile(1e-1, n_fft_bins), alpha_decay=0.5, alpha_rise=0.99)
self.gain = ExpFilter(np.tile(0.01, n_fft_bins), alpha_decay=0.001, alpha_rise=0.99)
self.r_filt = ExpFilter(np.tile(0.01, led_count // 2), alpha_decay=0.2, alpha_rise=0.99)
self.g_filt = ExpFilter(np.tile(0.01, led_count // 2), alpha_decay=0.05, alpha_rise=0.3)
|
identifier_body
|
|
dsp.py
|
_decay=0.001, alpha_rise=0.99)
self.r_filt = ExpFilter(np.tile(0.01, led_count // 2), alpha_decay=0.2, alpha_rise=0.99)
self.g_filt = ExpFilter(np.tile(0.01, led_count // 2), alpha_decay=0.05, alpha_rise=0.3)
self.b_filt = ExpFilter(np.tile(0.01, led_count // 2), alpha_decay=0.1, alpha_rise=0.5)
self.common_mode = ExpFilter(np.tile(0.01, led_count // 2), alpha_decay=0.99, alpha_rise=0.01)
self.p_filt = ExpFilter(np.tile(1, (3, led_count // 2)), alpha_decay=0.1, alpha_rise=0.99)
self.volume = ExpFilter(min_volume_threshold, alpha_decay=0.02, alpha_rise=0.02)
self.p = np.tile(1.0, (3, led_count // 2))
# Number of audio samples to read every time frame.
# self.samples_per_frame = int(default_sample_rate / fps)
self.samples_per_frame = int(frames_per_buffer)
# Array containing the rolling audio sample window.
self.y_roll = np.random.rand(n_rolling_history, self.samples_per_frame) / 1e16
self.fft_window = np.hamming(int(frames_per_buffer) * n_rolling_history)
self.samples = None
self.mel_y = None
self.mel_x = None
self.melbank = Melbank()
self.create_mel_bank()
def update(self, audio_samples):
"""
Return processed audio data.
Returns mel curve, x/y data.
This method is called every time there is a microphone update.
Returns:
-------
audio_data: dict
Dict containing "mel", "vol", "x", and "y".
"""
min_frequency = self._config["general_settings"]["min_frequency"]
max_frequency = self._config["general_settings"]["max_frequency"]
audio_data = {}
# Normalize samples between 0 and 1.
y = audio_samples / 2.0**15
# Construct a rolling window of audio samples.
self.y_roll[:-1] = self.y_roll[1:]
self.y_roll[-1, :] = np.copy(y)
y_data = np.concatenate(self.y_roll, axis=0).astype(np.float32)
vol = np.max(np.abs(y_data))
# Transform audio input into the frequency domain.
N = len(y_data)
N_zeros = 2**int(np.ceil(np.log2(N))) - N
# Pad with zeros until the next power of two.
y_data *= self.fft_window
y_padded = np.pad(y_data, (0, N_zeros), mode='constant')
YS = np.abs(np.fft.rfft(y_padded)[:N // 2])
# Construct a Mel filterbank from the FFT data.
mel = np.atleast_2d(YS).T * self.mel_y.T
# Scale data to values more suitable for visualization.
mel = np.sum(mel, axis=0)
mel = mel**2.0
# Gain normalization.
self.mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0)))
mel /= self.mel_gain.value
mel = self.mel_smoothing.update(mel)
x = np.linspace(min_frequency, max_frequency, len(mel))
y = self.fft_plot_filter.update(mel)
audio_data["mel"] = mel
audio_data["vol"] = vol
audio_data["x"] = x
audio_data["y"] = y
return audio_data
def rfft(self, data, window=None):
default_sample_rate = self._config["general_settings"]["default_sample_rate"]
window = 1.0 if window is None else window(len(data))
ys = np.abs(np.fft.rfft(data * window))
xs = np.fft.rfftfreq(len(data), 1.0 / default_sample_rate)
return xs, ys
def fft(self, data, window=None):
default_sample_rate = self._config["general_settings"]["default_sample_rate"]
window = 1.0 if window is None else window(len(data))
ys = np.fft.fft(data * window)
xs = np.fft.fftfreq(len(data), 1.0 / default_sample_rate)
return xs, ys
def create_mel_bank(self):
default_sample_rate = self._config["general_settings"]["default_sample_rate"]
min_frequency = self._config["general_settings"]["min_frequency"]
max_frequency = self._config["general_settings"]["max_frequency"]
frames_per_buffer = self._config["general_settings"]["frames_per_buffer"]
n_rolling_history = self._config["general_settings"]["n_rolling_history"]
n_fft_bins = self._config["general_settings"]["n_fft_bins"]
samples = int(frames_per_buffer * (n_rolling_history / 2))
self.mel_y, (_, self.mel_x) = self.melbank.compute_melmat(
num_mel_bands=n_fft_bins,
freq_min=min_frequency,
freq_max=max_frequency,
num_fft_bands=samples,
sample_rate=default_sample_rate
)
class ExpFilter():
"""Simple exponential smoothing filter."""
def __init__(self, val=0.0, alpha_decay=0.5, alpha_rise=0.5):
"""Small rise/decay factors = more smoothing."""
assert 0.0 < alpha_decay < 1.0, 'Invalid decay smoothing factor.'
assert 0.0 < alpha_rise < 1.0, 'Invalid rise smoothing factor.'
self.alpha_decay = alpha_decay
self.alpha_rise = alpha_rise
self.value = val
def update(self, value):
if isinstance(self.value, (list, np.ndarray, tuple)):
alpha = value - self.value
alpha[alpha > 0.0] = self.alpha_rise
alpha[alpha <= 0.0] = self.alpha_decay
else:
alpha = self.alpha_rise if value > self.value else self.alpha_decay
self.value = alpha * value + (1.0 - alpha) * self.value
return self.value
class Melbank():
|
An example is shown in the following figure:
.. plot::
from pylab import plt
import melbank
f1, f2 = 1000, 8000
melmat, (melfreq, fftfreq) = melbank.compute_melmat(6, f1, f2, num_fft_bands=4097)
fig, ax = plt.subplots(figsize=(8, 3))
ax.plot(fftfreq, melmat.T)
ax.grid(True)
ax.set_ylabel('Weight')
ax.set_xlabel('Frequency / Hz')
ax.set_xlim((f1, f2))
ax2 = ax.twiny()
ax2.xaxis.set_ticks_position('top')
ax2.set_xlim((f1, f2))
ax2.xaxis.set_ticks(melbank.mel_to_hertz(melfreq))
ax2.xaxis.set_ticklabels(['{:.0f}'.format(mf) for mf in melfreq])
ax2.set_xlabel('Frequency / mel')
plt.tight_layout()
fig, ax = plt.subplots()
ax.matshow(melmat)
plt.axis('equal')
plt.axis('tight')
plt.title('Mel Matrix')
plt.tight_layout()
Functions
---------
"""
def hertz_to_mel(self, freq):
"""
Returns mel-frequency from linear frequency input.
Parameter
---------
freq : scalar or ndarray
Frequency value or array in Hz.
Returns
-------
mel : scalar or ndarray
Mel-frequency value or ndarray in Mel
"""
return 3340.0 * log(1 + (freq / 250.0), 9)
def mel_to_hertz(self, mel):
"""
Returns frequency from mel-frequency input.
Parameter
---------
mel : scalar or ndarray
Mel-frequency value or ndarray in Mel
Returns
-------
freq : scalar or ndarray
Frequency value or array in Hz.
"""
# return 700.0 * (10**(mel / 2595.0)) - 700.0
return 250.0 * (9**(mel / 3340.0)) - 250.0
def melfrequencies_mel_filterbank(self, num_bands, freq_min, freq_max, num_fft_bands):
"""
Returns centerfrequencies and band edges for a mel filter bank
Parameters
----------
num_bands : int
Number of mel bands.
freq_min : scalar
Minimum frequency for the first band.
freq_max : scalar
Maximum frequency for the last band.
num_fft_bands : int
Number of fft bands.
Returns
-------
center_frequencies_mel :
|
"""This class implements a Mel Filter Bank.
In other words it is a filter bank with triangular shaped bands
arranged on the mel frequency scale.
|
random_line_split
|
taskGraph.py
|
yaml.constructor.yaml_constructors[
u'tag:yaml.org,2002:timestamp'] = \
yaml.constructor.yaml_constructors[u'tag:yaml.org,2002:str']
obj = yaml.load(f)
t = TaskGraph(obj)
return t
def export_task_speclist(self):
tlist_od = [] # task list ordered
for task in self:
tod = OrderedDict([(TaskSpecSchema.task_id, 'idholder'),
(TaskSpecSchema.node_type, 'typeholder'),
(TaskSpecSchema.conf, 'confholder'),
(TaskSpecSchema.inputs, 'inputsholder')
])
tod.update(task._task_spec)
if not isinstance(tod[TaskSpecSchema.node_type], str):
tod[TaskSpecSchema.node_type] = tod[
TaskSpecSchema.node_type].__name__
tlist_od.append(tod)
return tlist_od
def save_taskgraph(self, filename):
"""
Write a list of tasks i.e. taskgraph to a yaml file.
Arguments
-------
filename: str
The filename to write a yaml file to.
"""
if not TaskGraph.__SETUP_YAML_ONCE:
TaskGraph.setup_yaml()
# we want -id to be first in the resulting yaml file.
tlist_od = self.export_task_speclist()
with open(filename, 'w') as fh:
ruamel.yaml.dump(tlist_od, fh, default_flow_style=False)
def viz_graph(self, show_ports=False, pydot_options=None):
"""
Generate the visulization of the graph in the JupyterLab
Returns
-----
nx.DiGraph
"""
import networkx as nx
G = nx.DiGraph()
if pydot_options:
G.graph['graph'] = pydot_options
# instantiate objects
for itask in self:
task_inputs = itask[TaskSpecSchema.inputs]
to_task = itask[TaskSpecSchema.task_id]
to_type = itask[TaskSpecSchema.node_type]
if to_task == "":
to_task = OUTPUT_TYPE
for iport_or_tid in task_inputs:
# iport_or_tid: it is either to_port or task id (tid) b/c
# if using ports API task_inputs is a dictionary otherwise
# task_inputs is a list.
taskin_and_oport = task_inputs[iport_or_tid] \
if isinstance(task_inputs, dict) else iport_or_tid
isplit = taskin_and_oport.split('.')
from_task = isplit[0]
from_port = isplit[1] if len(isplit) > 1 else None
if show_ports and from_port is not None:
to_port = iport_or_tid
common_tip = taskin_and_oport
G.add_edge(from_task, common_tip, label=from_port)
G.add_edge(common_tip, to_task, label=to_port)
tnode = G.nodes[common_tip]
tnode.update({
# 'label': '',
'shape': 'point'})
else:
G.add_edge(from_task, to_task)
# draw output ports
if show_ports:
if (to_type == OUTPUT_TYPE):
continue
task_node = get_node_obj(itask, tgraph_mixin=True)
# task_outputs = itask.get(TaskSpecSchema.outputs, [])
for pout in task_node._get_output_ports():
out_tip = '{}.{}'.format(
itask[TaskSpecSchema.task_id], pout)
G.add_edge(to_task, out_tip, label=pout)
tnode = G.nodes[out_tip]
tnode.update({
# 'label': '',
'shape': 'point'})
return G
def _build(self, replace=None, profile=False):
"""
compute the graph structure of the nodes. It will set the input and
output nodes for each of the node
Arguments
-------
replace: dict
conf parameters replacement
"""
self.__node_dict.clear()
replace = dict() if replace is None else replace
# check if there are item in the replace that is not in the graph
task_ids = set([task[TaskSpecSchema.task_id] for task in self])
for rkey in replace.keys():
if rkey not in task_ids:
warnings.warn(
'Replace task-id {} not found in task-graph'.format(rkey),
RuntimeWarning)
# instantiate node objects
for task in self:
task_id = task[TaskSpecSchema.task_id]
nodetype = task[TaskSpecSchema.node_type]
nodetype = nodetype if isinstance(nodetype, str) else \
nodetype.__name__
if nodetype == OUTPUT_TYPE:
output_task = task
node = get_node_obj(output_task, tgraph_mixin=True)
else:
node = get_node_obj(task, replace.get(task_id), profile,
tgraph_mixin=True)
self.__node_dict[task_id] = node
# build the graph
for task_id in self.__node_dict:
node = self.__node_dict[task_id]
task_inputs = node._task_obj[TaskSpecSchema.inputs]
for iport in task_inputs:
# node_inputs should be a dict with entries:
# {iport: taskid.oport}
input_task = task_inputs[iport].split('.')
dst_port = iport
input_id = input_task[0]
# src_port = input_task[1] if len(input_task) > 1 else None
src_port = input_task[1]
try:
input_node = self.__node_dict[input_id]
except KeyError:
raise LookupError(
'Missing task "{}". Add task spec to TaskGraph.'
.format(input_id))
node.inputs.append({
'from_node': input_node,
'from_port': src_port,
'to_port': dst_port
})
# input_node.outputs.append(node)
input_node.outputs.append({
'to_node': node,
'to_port': dst_port,
'from_port': src_port
})
def build(self, replace=None, profile=None):
"""
compute the graph structure of the nodes. It will set the input and
output nodes for each of the node
Arguments
-------
replace: dict
conf parameters replacement
"""
profile = False if profile is None else profile
# make connection only
self._build(replace=replace, profile=profile)
# Columns type checking is done in the :meth:`TaskGraph._run` after the
# outputs are specified and participating tasks are determined.
# this part is to update each of the node so dynamic inputs can be
# processed
self.breadth_first_update()
def breadth_first_update(self, extra_roots=[], extra_updated=set()):
"""
Do a breadth first graph traversal and update nodes.
Update each note following the causal order. The children notes are
only added to the queue if all the parents are updated.
Each node is only updated once.
extra_roots and extra_updated should be empty for normal graph. It
is used for composite node when the graph is connected to other
graph.
"""
queue = []
updated = extra_updated
for k in self.__node_dict.keys():
if len(self.__node_dict[k].inputs) == 0:
queue.append(self.__node_dict[k])
queue.extend(extra_roots)
while (len(queue) != 0):
node_to_update = queue.pop(0)
# print('update {}'.format(node_to_update.uid))
if node_to_update not in updated:
node_to_update.update()
updated.add(node_to_update)
for element in node_to_update.outputs:
child = element['to_node']
if all([i['from_node'] in updated for i in child.inputs]):
queue.append(child)
# print('----done----')
def __str__(self):
out_str = ""
for k in self.__node_dict.keys():
out_str += k + ": " + str(self.__node_dict[k]) + "\n"
return out_str
def reset(self):
self.__node_dict.clear()
self.__task_list.clear()
self.__index = None
def register_node(self, module_name, classObj):
"""
Check `TaskGraph.register_lab_node`
"""
if self.__widget is not None:
encoded_class = get_encoded_class(classObj)
cacheCopy = copy.deepcopy(self.__widget.cache)
cacheCopy['register'] = {
"module": module_name,
"class": encoded_class
}
add_module_from_base64(module_name, encoded_class)
self.__widget.cache = cacheCopy
def _run(self, outputs=None, replace=None, profile=None, formated=False,
build=True):
replace = dict() if replace is None else replace
if build:
self.build(replace, profile)
else:
if replace:
warnings.warn(
'Replace is specified, but build is set to False. No '
'replacement will be done. Either set build=True or '
'first build with replace then call run.',
RuntimeWarning)
if profile is not None:
warnings.warn(
'Profile is specified, but build is set to False. '
'Profile will be done according to last build. '
'Alternatively either set build=True or first build with '
|
random_line_split
|
||
taskGraph.py
|
(object):
def __init__(self, values):
self.values = tuple([i[1] for i in values])
self.__keys = tuple([i[0] for i in values])
self.__dict = OrderedDict(values)
def __iter__(self):
return iter(self.values)
def __getitem__(self, key):
if isinstance(key, int):
return self.values[key]
else:
return self.__dict[key]
def __len__(self):
return len(self.values)
def __repr__(self):
return "Results"+self.__dict.__repr__()[11:]
def __str__(self):
return "Results"+self.__dict.__str__()[11:]
def __contains__(self, key):
return True if key in self.__dict else False
def get_keys(self):
return self.__keys
def formated_result(result):
import ipywidgets as widgets
from IPython.display import display
from ipywidgets import Output
outputs = [Output() for i in range(len(result))]
for i in range(len(result)):
with outputs[i]:
display(result[i])
tab = widgets.Tab()
tab.children = outputs
for i in range(len(result)):
tab.set_title(i, result.get_keys()[i])
return tab
class TaskGraph(object):
''' TaskGraph class that is used to store the graph.
'''
__SETUP_YAML_ONCE = False
@staticmethod
def setup_yaml():
'''Write out yaml in order for OrderedDict.'''
# https://stackoverflow.com/a/8661021
# https://stackoverflow.com/questions/47692094/lambda-works-
# defined-function-does-not # noqa
# represent_dict_order = lambda dumper, data: \
# dumper.represent_mapping('tag:yaml.org,2002:map', data.items())
def represent_dict_order(dumper, data):
return dumper.represent_mapping('tag:yaml.org,2002:map',
data.items())
ruamel.yaml.add_representer(OrderedDict, represent_dict_order)
TaskGraph.__SETUP_YAML_ONCE = True
def __init__(self, task_spec_list=None):
'''
:param task_spec_list: List of task-spec dicts per TaskSpecSchema.
'''
self.__task_list = {}
self.__node_dict = {}
self.__index = None
# this is server widget that this taskgraph associated with
self.__widget = None
error_msg = 'Task-id "{}" already in the task graph. Set '\
'replace=True to replace existing task with extended task.'
self.__extend(task_spec_list=task_spec_list, replace=False,
error_msg=error_msg)
def __extend(self, task_spec_list=None, replace=False, error_msg=None):
tspec_list = dict() if task_spec_list is None else task_spec_list
if error_msg is None:
error_msg = 'Task-id "{}" already in the task graph. Set '\
'replace=True to replace existing task.'
for tspec in tspec_list:
task = Task(tspec)
task_id = task[TaskSpecSchema.task_id]
if task_id in self.__task_list and not replace:
raise Exception(error_msg.format(task_id))
self.__task_list[task_id] = task
if self.__widget is not None:
self.__widget.value = self.export_task_speclist()
def extend(self, task_spec_list=None, replace=False):
'''
Add more task-spec dicts to the graph
:param task_spec_list: List of task-spec dicts per TaskSpecSchema.
'''
error_msg = 'Task-id "{}" already in the task graph. Set '\
'replace=True to replace existing task with extended task.'
self.__extend(task_spec_list=task_spec_list, replace=replace,
error_msg=error_msg)
def __contains__(self, task_id):
return True if task_id in self.__task_list else False
def __len__(self):
return len(self.__task_list)
def __iter__(self):
self.__index = 0
self.__tlist = list(self.__task_list.values())
return self
def __next__(self):
idx = self.__index
if idx is None or idx == len(self.__tlist):
self.__index = None
raise StopIteration
task = self.__tlist[idx]
self.__index = idx + 1
return task
def __getitem__(self, key):
# FIXME: This is inconsistent. Above for __contains__, __iter__, and
# __next__, the returned object is a Task instance. Here however
# the returned object is a Node instance.
if not self.__node_dict:
warnings.warn(
'Task graph internal state empty. Did you build the task '
'graph? Run ".build()"',
RuntimeWarning)
elif key not in self.__node_dict:
warnings.warn(
'Task graph missing task id "{}". Check the spelling of the '
'task id.'.format(key),
RuntimeWarning)
return self.__node_dict[key]
def __find_roots(self, node, inputs, consider_load=True):
"""
find the root nodes that the `node` depends on
Arguments
-------
node: Node
the leaf node, of whom we need to find the dependent input nodes
inputs: list
resulting list to store all the root nodes in this list
consider_load: bool
whether it skips the node which are loading cache file or not
Returns
-----
None
"""
if (node.visited):
return
node.visited = True
if len(node.inputs) == 0:
inputs.append(node)
return
if consider_load and node.load:
inputs.append(node)
return
for node_in in node.inputs:
inode = node_in['from_node']
self.__find_roots(inode, inputs, consider_load)
def start_labwidget(self):
from IPython.display import display
display(self.draw())
@staticmethod
def register_lab_node(module_name, class_obj):
"""
Register the node class for the Greenflowlab. It put the class_obj
into a sys.modules with `module_name`. It will register the node
class into the Jupyterlab kernel space, communicate with the
client to populate the add nodes menus, sync up with
Jupyterlab Server space to register the node class.
The latest registered `class_obj` overwrites the old one.
Arguments
-------
module_name: str
the module name for `class_obj`. It will also be the menu name for
the node. Note, if use '.' inside the 'module_name', the client
will automatically construct the hierachical menus based on '.'
class_obj: Node
The node class that is the subclass of greenflow 'Node'. It is
usually defined dynamically so it can be registered.
Returns
-----
None
"""
global server_task_graph
if server_task_graph is None:
server_task_graph = TaskGraph()
server_task_graph.start_labwidget()
server_task_graph.register_node(module_name, class_obj)
@staticmethod
def load_taskgraph(filename):
"""
load the yaml file to TaskGraph object
Arguments
-------
filename: str
the filename pointing to the yaml file in the filesystem
Returns
-----
object
the TaskGraph instance
"""
with open(filename) as f:
yaml = ruamel.yaml.YAML(typ='safe')
yaml.constructor.yaml_constructors[
u'tag:yaml.org,2002:timestamp'] = \
yaml.constructor.yaml_constructors[u'tag:yaml.org,2002:str']
obj = yaml.load(f)
t = TaskGraph(obj)
return t
def export_task_speclist(self):
tlist_od = [] # task list ordered
for task in self:
tod = OrderedDict([(TaskSpecSchema.task_id, 'idholder'),
(TaskSpecSchema.node_type, 'typeholder'),
(TaskSpecSchema.conf, 'confholder'),
(TaskSpecSchema.inputs, 'inputsholder')
])
tod.update(task._task_spec)
if not isinstance(tod[TaskSpecSchema.node_type], str):
tod[TaskSpecSchema.node_type] = tod[
TaskSpecSchema.node_type].__name__
tlist_od.append(tod)
return tlist_od
def save_taskgraph(self, filename):
"""
Write a list of tasks i.e. taskgraph to a yaml file.
Arguments
-------
filename: str
The filename to write a yaml file to.
"""
if not TaskGraph.__SETUP_YAML_ONCE:
TaskGraph.setup_yaml()
# we want -id to be first in the resulting yaml file.
tlist_od = self.export_task_speclist()
with open(filename, 'w') as fh:
ruamel.yaml.dump(tlist_od, fh, default_flow_style=False)
def viz_graph(self, show_ports=False, pydot_options=None):
"""
Generate the visulization of the graph in the JupyterLab
Returns
-----
nx.DiGraph
"""
import networkx as nx
G = nx.DiGraph()
if
|
Results
|
identifier_name
|
|
taskGraph.py
|
.__tlist):
self.__index = None
raise StopIteration
task = self.__tlist[idx]
self.__index = idx + 1
return task
def __getitem__(self, key):
# FIXME: This is inconsistent. Above for __contains__, __iter__, and
# __next__, the returned object is a Task instance. Here however
# the returned object is a Node instance.
if not self.__node_dict:
warnings.warn(
'Task graph internal state empty. Did you build the task '
'graph? Run ".build()"',
RuntimeWarning)
elif key not in self.__node_dict:
warnings.warn(
'Task graph missing task id "{}". Check the spelling of the '
'task id.'.format(key),
RuntimeWarning)
return self.__node_dict[key]
def __find_roots(self, node, inputs, consider_load=True):
"""
find the root nodes that the `node` depends on
Arguments
-------
node: Node
the leaf node, of whom we need to find the dependent input nodes
inputs: list
resulting list to store all the root nodes in this list
consider_load: bool
whether it skips the node which are loading cache file or not
Returns
-----
None
"""
if (node.visited):
return
node.visited = True
if len(node.inputs) == 0:
inputs.append(node)
return
if consider_load and node.load:
inputs.append(node)
return
for node_in in node.inputs:
|
def start_labwidget(self):
from IPython.display import display
display(self.draw())
@staticmethod
def register_lab_node(module_name, class_obj):
"""
Register the node class for the Greenflowlab. It put the class_obj
into a sys.modules with `module_name`. It will register the node
class into the Jupyterlab kernel space, communicate with the
client to populate the add nodes menus, sync up with
Jupyterlab Server space to register the node class.
The latest registered `class_obj` overwrites the old one.
Arguments
-------
module_name: str
the module name for `class_obj`. It will also be the menu name for
the node. Note, if use '.' inside the 'module_name', the client
will automatically construct the hierachical menus based on '.'
class_obj: Node
The node class that is the subclass of greenflow 'Node'. It is
usually defined dynamically so it can be registered.
Returns
-----
None
"""
global server_task_graph
if server_task_graph is None:
server_task_graph = TaskGraph()
server_task_graph.start_labwidget()
server_task_graph.register_node(module_name, class_obj)
@staticmethod
def load_taskgraph(filename):
"""
load the yaml file to TaskGraph object
Arguments
-------
filename: str
the filename pointing to the yaml file in the filesystem
Returns
-----
object
the TaskGraph instance
"""
with open(filename) as f:
yaml = ruamel.yaml.YAML(typ='safe')
yaml.constructor.yaml_constructors[
u'tag:yaml.org,2002:timestamp'] = \
yaml.constructor.yaml_constructors[u'tag:yaml.org,2002:str']
obj = yaml.load(f)
t = TaskGraph(obj)
return t
def export_task_speclist(self):
tlist_od = [] # task list ordered
for task in self:
tod = OrderedDict([(TaskSpecSchema.task_id, 'idholder'),
(TaskSpecSchema.node_type, 'typeholder'),
(TaskSpecSchema.conf, 'confholder'),
(TaskSpecSchema.inputs, 'inputsholder')
])
tod.update(task._task_spec)
if not isinstance(tod[TaskSpecSchema.node_type], str):
tod[TaskSpecSchema.node_type] = tod[
TaskSpecSchema.node_type].__name__
tlist_od.append(tod)
return tlist_od
def save_taskgraph(self, filename):
"""
Write a list of tasks i.e. taskgraph to a yaml file.
Arguments
-------
filename: str
The filename to write a yaml file to.
"""
if not TaskGraph.__SETUP_YAML_ONCE:
TaskGraph.setup_yaml()
# we want -id to be first in the resulting yaml file.
tlist_od = self.export_task_speclist()
with open(filename, 'w') as fh:
ruamel.yaml.dump(tlist_od, fh, default_flow_style=False)
def viz_graph(self, show_ports=False, pydot_options=None):
"""
Generate the visulization of the graph in the JupyterLab
Returns
-----
nx.DiGraph
"""
import networkx as nx
G = nx.DiGraph()
if pydot_options:
G.graph['graph'] = pydot_options
# instantiate objects
for itask in self:
task_inputs = itask[TaskSpecSchema.inputs]
to_task = itask[TaskSpecSchema.task_id]
to_type = itask[TaskSpecSchema.node_type]
if to_task == "":
to_task = OUTPUT_TYPE
for iport_or_tid in task_inputs:
# iport_or_tid: it is either to_port or task id (tid) b/c
# if using ports API task_inputs is a dictionary otherwise
# task_inputs is a list.
taskin_and_oport = task_inputs[iport_or_tid] \
if isinstance(task_inputs, dict) else iport_or_tid
isplit = taskin_and_oport.split('.')
from_task = isplit[0]
from_port = isplit[1] if len(isplit) > 1 else None
if show_ports and from_port is not None:
to_port = iport_or_tid
common_tip = taskin_and_oport
G.add_edge(from_task, common_tip, label=from_port)
G.add_edge(common_tip, to_task, label=to_port)
tnode = G.nodes[common_tip]
tnode.update({
# 'label': '',
'shape': 'point'})
else:
G.add_edge(from_task, to_task)
# draw output ports
if show_ports:
if (to_type == OUTPUT_TYPE):
continue
task_node = get_node_obj(itask, tgraph_mixin=True)
# task_outputs = itask.get(TaskSpecSchema.outputs, [])
for pout in task_node._get_output_ports():
out_tip = '{}.{}'.format(
itask[TaskSpecSchema.task_id], pout)
G.add_edge(to_task, out_tip, label=pout)
tnode = G.nodes[out_tip]
tnode.update({
# 'label': '',
'shape': 'point'})
return G
def _build(self, replace=None, profile=False):
"""
compute the graph structure of the nodes. It will set the input and
output nodes for each of the node
Arguments
-------
replace: dict
conf parameters replacement
"""
self.__node_dict.clear()
replace = dict() if replace is None else replace
# check if there are item in the replace that is not in the graph
task_ids = set([task[TaskSpecSchema.task_id] for task in self])
for rkey in replace.keys():
if rkey not in task_ids:
warnings.warn(
'Replace task-id {} not found in task-graph'.format(rkey),
RuntimeWarning)
# instantiate node objects
for task in self:
task_id = task[TaskSpecSchema.task_id]
nodetype = task[TaskSpecSchema.node_type]
nodetype = nodetype if isinstance(nodetype, str) else \
nodetype.__name__
if nodetype == OUTPUT_TYPE:
output_task = task
node = get_node_obj(output_task, tgraph_mixin=True)
else:
node = get_node_obj(task, replace.get(task_id), profile,
tgraph_mixin=True)
self.__node_dict[task_id] = node
# build the graph
for task_id in self.__node_dict:
node = self.__node_dict[task_id]
task_inputs = node._task_obj[TaskSpecSchema.inputs]
for iport in task_inputs:
# node_inputs should be a dict with entries:
# {iport: taskid.oport}
input_task = task_inputs[iport].split('.')
dst_port = iport
input_id = input_task[0]
# src_port = input_task[1] if len(input_task) > 1 else None
src_port = input_task[1]
try:
input_node = self.__node_dict[input_id]
except KeyError:
raise LookupError(
'Missing task "{}". Add task spec to TaskGraph.'
.format(input_id))
node.inputs.append({
'from_node': input_node,
'from_port': src_port,
'to_port': dst_port
})
# input_node.outputs.append(node)
input_node.outputs.append({
'to_node': node,
'to_port': dst_port,
'from_port': src_port
|
inode = node_in['from_node']
self.__find_roots(inode, inputs, consider_load)
|
conditional_block
|
taskGraph.py
|
def __len__(self):
return len(self.__task_list)
def __iter__(self):
self.__index = 0
self.__tlist = list(self.__task_list.values())
return self
def __next__(self):
idx = self.__index
if idx is None or idx == len(self.__tlist):
self.__index = None
raise StopIteration
task = self.__tlist[idx]
self.__index = idx + 1
return task
def __getitem__(self, key):
# FIXME: This is inconsistent. Above for __contains__, __iter__, and
# __next__, the returned object is a Task instance. Here however
# the returned object is a Node instance.
if not self.__node_dict:
warnings.warn(
'Task graph internal state empty. Did you build the task '
'graph? Run ".build()"',
RuntimeWarning)
elif key not in self.__node_dict:
warnings.warn(
'Task graph missing task id "{}". Check the spelling of the '
'task id.'.format(key),
RuntimeWarning)
return self.__node_dict[key]
def __find_roots(self, node, inputs, consider_load=True):
"""
find the root nodes that the `node` depends on
Arguments
-------
node: Node
the leaf node, of whom we need to find the dependent input nodes
inputs: list
resulting list to store all the root nodes in this list
consider_load: bool
whether it skips the node which are loading cache file or not
Returns
-----
None
"""
if (node.visited):
return
node.visited = True
if len(node.inputs) == 0:
inputs.append(node)
return
if consider_load and node.load:
inputs.append(node)
return
for node_in in node.inputs:
inode = node_in['from_node']
self.__find_roots(inode, inputs, consider_load)
def start_labwidget(self):
from IPython.display import display
display(self.draw())
@staticmethod
def register_lab_node(module_name, class_obj):
"""
Register the node class for the Greenflowlab. It put the class_obj
into a sys.modules with `module_name`. It will register the node
class into the Jupyterlab kernel space, communicate with the
client to populate the add nodes menus, sync up with
Jupyterlab Server space to register the node class.
The latest registered `class_obj` overwrites the old one.
Arguments
-------
module_name: str
the module name for `class_obj`. It will also be the menu name for
the node. Note, if use '.' inside the 'module_name', the client
will automatically construct the hierachical menus based on '.'
class_obj: Node
The node class that is the subclass of greenflow 'Node'. It is
usually defined dynamically so it can be registered.
Returns
-----
None
"""
global server_task_graph
if server_task_graph is None:
server_task_graph = TaskGraph()
server_task_graph.start_labwidget()
server_task_graph.register_node(module_name, class_obj)
@staticmethod
def load_taskgraph(filename):
"""
load the yaml file to TaskGraph object
Arguments
-------
filename: str
the filename pointing to the yaml file in the filesystem
Returns
-----
object
the TaskGraph instance
"""
with open(filename) as f:
yaml = ruamel.yaml.YAML(typ='safe')
yaml.constructor.yaml_constructors[
u'tag:yaml.org,2002:timestamp'] = \
yaml.constructor.yaml_constructors[u'tag:yaml.org,2002:str']
obj = yaml.load(f)
t = TaskGraph(obj)
return t
def export_task_speclist(self):
tlist_od = [] # task list ordered
for task in self:
tod = OrderedDict([(TaskSpecSchema.task_id, 'idholder'),
(TaskSpecSchema.node_type, 'typeholder'),
(TaskSpecSchema.conf, 'confholder'),
(TaskSpecSchema.inputs, 'inputsholder')
])
tod.update(task._task_spec)
if not isinstance(tod[TaskSpecSchema.node_type], str):
tod[TaskSpecSchema.node_type] = tod[
TaskSpecSchema.node_type].__name__
tlist_od.append(tod)
return tlist_od
def save_taskgraph(self, filename):
"""
Write a list of tasks i.e. taskgraph to a yaml file.
Arguments
-------
filename: str
The filename to write a yaml file to.
"""
if not TaskGraph.__SETUP_YAML_ONCE:
TaskGraph.setup_yaml()
# we want -id to be first in the resulting yaml file.
tlist_od = self.export_task_speclist()
with open(filename, 'w') as fh:
ruamel.yaml.dump(tlist_od, fh, default_flow_style=False)
def viz_graph(self, show_ports=False, pydot_options=None):
"""
Generate the visulization of the graph in the JupyterLab
Returns
-----
nx.DiGraph
"""
import networkx as nx
G = nx.DiGraph()
if pydot_options:
G.graph['graph'] = pydot_options
# instantiate objects
for itask in self:
task_inputs = itask[TaskSpecSchema.inputs]
to_task = itask[TaskSpecSchema.task_id]
to_type = itask[TaskSpecSchema.node_type]
if to_task == "":
to_task = OUTPUT_TYPE
for iport_or_tid in task_inputs:
# iport_or_tid: it is either to_port or task id (tid) b/c
# if using ports API task_inputs is a dictionary otherwise
# task_inputs is a list.
taskin_and_oport = task_inputs[iport_or_tid] \
if isinstance(task_inputs, dict) else iport_or_tid
isplit = taskin_and_oport.split('.')
from_task = isplit[0]
from_port = isplit[1] if len(isplit) > 1 else None
if show_ports and from_port is not None:
to_port = iport_or_tid
common_tip = taskin_and_oport
G.add_edge(from_task, common_tip, label=from_port)
G.add_edge(common_tip, to_task, label=to_port)
tnode = G.nodes[common_tip]
tnode.update({
# 'label': '',
'shape': 'point'})
else:
G.add_edge(from_task, to_task)
# draw output ports
if show_ports:
if (to_type == OUTPUT_TYPE):
continue
task_node = get_node_obj(itask, tgraph_mixin=True)
# task_outputs = itask.get(TaskSpecSchema.outputs, [])
for pout in task_node._get_output_ports():
out_tip = '{}.{}'.format(
itask[TaskSpecSchema.task_id], pout)
G.add_edge(to_task, out_tip, label=pout)
tnode = G.nodes[out_tip]
tnode.update({
# 'label': '',
'shape': 'point'})
return G
def _build(self, replace=None, profile=False):
"""
compute the graph structure of the nodes. It will set the input and
output nodes for each of the node
Arguments
-------
replace: dict
conf parameters replacement
"""
self.__node_dict.clear()
replace = dict() if replace is None else replace
# check if there are item in the replace that is not in the graph
task_ids = set([task[TaskSpecSchema.task_id] for task in self])
for rkey in replace.keys():
if rkey not in task_ids:
warnings.warn(
'Replace task-id {} not found in task-graph'.format(rkey),
RuntimeWarning)
# instantiate node objects
for task in self:
task_id = task[TaskSpecSchema.task_id]
nodetype = task[TaskSpecSchema.node_type]
nodetype = nodetype if isinstance(nodetype, str) else \
nodetype.__name__
if nodetype == OUTPUT_TYPE:
output_task = task
node = get_node_obj(output_task, tgraph_mixin=True)
else:
node = get_node_obj(task, replace.get(task_id), profile,
tgraph_mixin=True)
self.__node_dict[task_id] = node
# build the graph
for task_id in self.__node_dict:
node = self.__node_dict[task_id]
task_inputs = node._task_obj[TaskSpecSchema.inputs]
for iport in task_inputs:
# node_inputs should be a dict with entries:
# {iport: taskid.oport}
input_task = task_inputs[iport].split('.')
dst_port = iport
input_id = input_task[0]
# src_port = input_task[1] if len(input_task) > 1 else None
src_port = input_task[1]
try:
input_node = self.__node_dict[input_id]
except KeyError:
raise LookupError(
'Missing task "{}
|
return True if task_id in self.__task_list else False
|
identifier_body
|
|
1.cifar10_classification_lightmodel.py
|
, and
# Geoffrey Hinton. Classes are: airplane, automobile, bird, cat, deer,
# dog, frog, horse, ship, truck
from keras.datasets import cifar10
# 1.3 Basic classes for specifying and training a neural network
# Keras has two types of models Sequential and Model Class for complex models
# Sequential models are essentially layer-by-layer. Model Class models may
# also have branching.
from keras.models import Sequential
# 1.3.1 Import layers that will be used in modeling
from keras.layers import Convolution2D, MaxPooling2D, Dropout, Flatten, Dense
# 1.4 Keras utilities for one-hot encoding of ground truth values
from keras.utils import np_utils
# 1.5 Import keras optimizers unless you want default parameters
# from keras.optimizers import Adam
# 1.6
import os, time
import matplotlib.pyplot as plt
# %matplotlib inline
#%% B. Define needed constants
# 2.0 Set some hyperparameters
# 2.1
batch_size = 16 # in each iteration, we consider 32 training examples at once
# 2.1
num_epochs = 5 # we iterate 5 times over the entire training set
# 2.3
kernel_size = 3 # we will use 3x3 kernels throughout
# 2.4
pool_size = 2 # we will use 2x2 pooling throughout
# 2.5
conv_depth_1 = 32 # we will initially have 32 filters per conv. layer...
# Remember each filter filters out some structure from image data
# 2.6
conv_depth_2 = 64 # ...switching to 64 filters the first pooling layer
# 2.7
drop_prob_1 = 0.25 # dropout after pooling with probability 0.25
# 2.8
drop_prob_2 = 0.5 # dropout in the FC layer with probability 0.5
# 2.9
hidden_size = 512 # the FC layer will have 512 neurons
#%% C. Fetch cifar10 images & transform
"""
About CIFAR-10 images
Ref: https://en.wikipedia.org/wiki/CIFAR-10
The CIFAR-10 dataset (Canadian Institute For Advanced Research) is a
collection of images that are commonly used to train machine learning
and computer vision algorithms. It is one of the most widely used datasets
for machine learning research. The CIFAR-10 dataset contains 60,000 32x32
color images in 10 different classes. The 10 different classes represent
airplanes, cars, birds, cats, deer, dogs, frogs, horses, ships, and trucks.
There are 6,000 images of each class. (Alex Krizhevsky)
"""
# 3. Download, unzip and divide into training/test data cifar10 images
# By default download occurs at C:\Users\ashokharnal\.keras\datasets\
# Or at /home/ashok/.keras/datasets ; Downloaded file: cifar-10-batches-py.tar.gz.
# Expanded in folder: cifar-10-batches-py
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train.shape # 50000 images, 32 X 32 pixels, 3-channels
y_train.shape # (50000, 1)
# 3.1 Have a look at the data
X_train[:1,...] # np.random.normal(size = (2,3,4))
y_train[:10,:]
# 4. There are 50000 training examples in CIFAR-10
num_train, height, width, depth = X_train.shape
# 4.1 There are 10000 test examples in CIFAR-10
num_test = X_test.shape[0]
num_test
# 4.2 There are 10 image classes
num_classes = np.unique(y_train).shape[0]
num_classes
# 4.2.1 Class names are in alphabetical sequence
class_names = ["airplane","automobile","bird","cat","deer","dog","frog","horse","ship","truck"]
# 4.2 There are 10 image classes
num_classes = np.unique(y_train).shape[0]
class_names = ["airplane","automobile","bird",
"cat","deer","dog","frog","horse",
"ship","truck"]
# 5. See an image
i = 1
im1 = X_train[i] # Get the ith image array
# 5.1 To which class does it belong
y_train[i]
k = class_names[y_train[i][0]] # Get 0th element of y_train[1]
print(k)
# 5.2 Plot the image
fig = plt.figure(figsize=(4,2))
plt.imshow(im1) # imshow() is a matplotlib method
plt.show()
# 5. Change array types and normalise
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= np.max(X_train) # Normalise data to [0, 1] range
X_test /= np.max(X_test) # It is a global rather than column-wise (axis =0)
# normalization
# 5.1 This is columnwise normalization
a = np.array([[2,3,4], [50,100,200]])
a = a/np.max(a, axis = 0)
a
# 6. One-hot encode the labels
# For all classification problems, even when num_classes is 2
# use OHE with softmax layer. See VGG16 problem for 2-classes.
# For two classes, you can also use one 'sigmoid' at the output
# without earlier performing OHE. See VGG16 problem for 2-classes.
Y_train = np_utils.to_categorical(y_train, num_classes)
Y_train[:5, :]
y_train[:5]
Y_test = np_utils.to_categorical(y_test, num_classes)
Y_test[:3,:4]
#%% D. Model building
# 7. Conv [32] -> Conv [32] -> Pool (with dropout on the pooling layer)
# Same padding means the size of output feature-maps are the same as
# the input feature-maps (under the assumption of stride=1)
# padding= "SAME" tries to pad evenly left and right, but if the amount
# of columns to be added is odd, it will add the extra column to the right
# 7.1
# See keras layers: https://keras.io/layers/about-keras-layers/
model = Sequential()
model.add(Convolution2D(conv_depth_2, # 32 filters
(kernel_size, kernel_size), # 3 X 3
padding='same', # Do zero padding
activation='relu',
input_shape=(height, width, depth) # 32 X 32 X 3
)
)
#7.1.1
model.add(Convolution2D(conv_depth_1,
(kernel_size, kernel_size),
padding='valid',
activation='relu')
)
# 7.1.2
model.add(MaxPooling2D(
pool_size=(pool_size, pool_size)) # 2 X 2
)
# 7.1.3
# https://keras.io/layers/core/#dropout
model.add(Dropout(
drop_prob_1 # 0.25
)
)
# 7.2 Now flatten to 1D, apply FC -> ReLU (with dropout) -> softmax
# Fully connected layer
# https://keras.io/layers/core/#flatten
model.add(Flatten())
# 7.2.1 Output of this dense layer: Ist hidden layer
# https://keras.io/layers/core/#dense
"""
Dense implements the operation:
output = activation(dot(input, kernel) + bias)
where activation is the element-wise activation function
passed as the activation argument, kernel is a weights matrix
created by the layer, and bias is a bias vector created by the
layer (only applicable if use_bias is True).
Size of output has to be specified
"""
model.add(Dense(hidden_size, activation='relu')) # output size = hidden_size
model.add( Dropout
(
drop_prob_2 # 0.5
)
)
|
# exp(xi)/Sigma(exp(xk))
"""
Softmax
If we take an input of [1, 2, 3, 4, 1, 2, 3], the softmax of that
is [0.024, 0.064, 0.175, 0.475, 0.024, 0.064, 0
|
# 7.2.2 Final output layer; softmax
# About softmax: https://en.wikipedia.org/wiki/Softmax_function
|
random_line_split
|
1.cifar10_classification_lightmodel.py
|
will have 512 neurons
#%% C. Fetch cifar10 images & transform
"""
About CIFAR-10 images
Ref: https://en.wikipedia.org/wiki/CIFAR-10
The CIFAR-10 dataset (Canadian Institute For Advanced Research) is a
collection of images that are commonly used to train machine learning
and computer vision algorithms. It is one of the most widely used datasets
for machine learning research. The CIFAR-10 dataset contains 60,000 32x32
color images in 10 different classes. The 10 different classes represent
airplanes, cars, birds, cats, deer, dogs, frogs, horses, ships, and trucks.
There are 6,000 images of each class. (Alex Krizhevsky)
"""
# 3. Download, unzip and divide into training/test data cifar10 images
# By default download occurs at C:\Users\ashokharnal\.keras\datasets\
# Or at /home/ashok/.keras/datasets ; Downloaded file: cifar-10-batches-py.tar.gz.
# Expanded in folder: cifar-10-batches-py
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train.shape # 50000 images, 32 X 32 pixels, 3-channels
y_train.shape # (50000, 1)
# 3.1 Have a look at the data
X_train[:1,...] # np.random.normal(size = (2,3,4))
y_train[:10,:]
# 4. There are 50000 training examples in CIFAR-10
num_train, height, width, depth = X_train.shape
# 4.1 There are 10000 test examples in CIFAR-10
num_test = X_test.shape[0]
num_test
# 4.2 There are 10 image classes
num_classes = np.unique(y_train).shape[0]
num_classes
# 4.2.1 Class names are in alphabetical sequence
class_names = ["airplane","automobile","bird","cat","deer","dog","frog","horse","ship","truck"]
# 4.2 There are 10 image classes
num_classes = np.unique(y_train).shape[0]
class_names = ["airplane","automobile","bird",
"cat","deer","dog","frog","horse",
"ship","truck"]
# 5. See an image
i = 1
im1 = X_train[i] # Get the ith image array
# 5.1 To which class does it belong
y_train[i]
k = class_names[y_train[i][0]] # Get 0th element of y_train[1]
print(k)
# 5.2 Plot the image
fig = plt.figure(figsize=(4,2))
plt.imshow(im1) # imshow() is a matplotlib method
plt.show()
# 5. Change array types and normalise
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= np.max(X_train) # Normalise data to [0, 1] range
X_test /= np.max(X_test) # It is a global rather than column-wise (axis =0)
# normalization
# 5.1 This is columnwise normalization
a = np.array([[2,3,4], [50,100,200]])
a = a/np.max(a, axis = 0)
a
# 6. One-hot encode the labels
# For all classification problems, even when num_classes is 2
# use OHE with softmax layer. See VGG16 problem for 2-classes.
# For two classes, you can also use one 'sigmoid' at the output
# without earlier performing OHE. See VGG16 problem for 2-classes.
Y_train = np_utils.to_categorical(y_train, num_classes)
Y_train[:5, :]
y_train[:5]
Y_test = np_utils.to_categorical(y_test, num_classes)
Y_test[:3,:4]
#%% D. Model building
# 7. Conv [32] -> Conv [32] -> Pool (with dropout on the pooling layer)
# Same padding means the size of output feature-maps are the same as
# the input feature-maps (under the assumption of stride=1)
# padding= "SAME" tries to pad evenly left and right, but if the amount
# of columns to be added is odd, it will add the extra column to the right
# 7.1
# See keras layers: https://keras.io/layers/about-keras-layers/
model = Sequential()
model.add(Convolution2D(conv_depth_2, # 32 filters
(kernel_size, kernel_size), # 3 X 3
padding='same', # Do zero padding
activation='relu',
input_shape=(height, width, depth) # 32 X 32 X 3
)
)
#7.1.1
model.add(Convolution2D(conv_depth_1,
(kernel_size, kernel_size),
padding='valid',
activation='relu')
)
# 7.1.2
model.add(MaxPooling2D(
pool_size=(pool_size, pool_size)) # 2 X 2
)
# 7.1.3
# https://keras.io/layers/core/#dropout
model.add(Dropout(
drop_prob_1 # 0.25
)
)
# 7.2 Now flatten to 1D, apply FC -> ReLU (with dropout) -> softmax
# Fully connected layer
# https://keras.io/layers/core/#flatten
model.add(Flatten())
# 7.2.1 Output of this dense layer: Ist hidden layer
# https://keras.io/layers/core/#dense
"""
Dense implements the operation:
output = activation(dot(input, kernel) + bias)
where activation is the element-wise activation function
passed as the activation argument, kernel is a weights matrix
created by the layer, and bias is a bias vector created by the
layer (only applicable if use_bias is True).
Size of output has to be specified
"""
model.add(Dense(hidden_size, activation='relu')) # output size = hidden_size
model.add( Dropout
(
drop_prob_2 # 0.5
)
)
# 7.2.2 Final output layer; softmax
# About softmax: https://en.wikipedia.org/wiki/Softmax_function
# exp(xi)/Sigma(exp(xk))
"""
Softmax
If we take an input of [1, 2, 3, 4, 1, 2, 3], the softmax of that
is [0.024, 0.064, 0.175, 0.475, 0.024, 0.064, 0.175]. The output
has most of its weight where the '4' was in the original input.
This is what the function is normally used for: to highlight the
largest values and suppress values which are significantly below
the maximum value.
See calculations at the end of his code.
"""
model.add(
Dense(num_classes,
activation='softmax'
)
)
# 7.3 Compile model and add necesary parameters
# Cross entropy: http://203.122.28.230/moodle/mod/url/view.php?id=1409
#
model.compile(loss='categorical_crossentropy', # using the cross-entropy loss function
optimizer='adam', # using the Adam optimiser
# Ref: https://keras.io/optimizers/
# keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
metrics=['accuracy']) # reporting the accuracy
#%% E. Model training and evaluation
# 8. ...holding out 10% of the data for validation. 26 minutes
# To save time consider just 20000 samples for training
# 10% of these, ie 2000 are for validation
# 8.1
X_train.shape
Y_train.shape
# 8.2 Takes 20 minutes
# Specify fit/train hyperparameters
start = time.time()
history = model.fit(X_train[:20000], # Train model using limited training data
Y_train[:20000],
batch_size=batch_size,
epochs=num_epochs,
verbose=1,
validation_split=0.1
)
end = time.time()
print ((end - start)/60)
# 8.3 Certain parameters are associated
# with 'history' object
history.epoch
history.params
# 8.4 How accuracy changes as epochs increase
# We will use this function agai and again
# in subsequent examples
def
|
plot_history
|
identifier_name
|
|
1.cifar10_classification_lightmodel.py
|
datasets
for machine learning research. The CIFAR-10 dataset contains 60,000 32x32
color images in 10 different classes. The 10 different classes represent
airplanes, cars, birds, cats, deer, dogs, frogs, horses, ships, and trucks.
There are 6,000 images of each class. (Alex Krizhevsky)
"""
# 3. Download, unzip and divide into training/test data cifar10 images
# By default download occurs at C:\Users\ashokharnal\.keras\datasets\
# Or at /home/ashok/.keras/datasets ; Downloaded file: cifar-10-batches-py.tar.gz.
# Expanded in folder: cifar-10-batches-py
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train.shape # 50000 images, 32 X 32 pixels, 3-channels
y_train.shape # (50000, 1)
# 3.1 Have a look at the data
X_train[:1,...] # np.random.normal(size = (2,3,4))
y_train[:10,:]
# 4. There are 50000 training examples in CIFAR-10
num_train, height, width, depth = X_train.shape
# 4.1 There are 10000 test examples in CIFAR-10
num_test = X_test.shape[0]
num_test
# 4.2 There are 10 image classes
num_classes = np.unique(y_train).shape[0]
num_classes
# 4.2.1 Class names are in alphabetical sequence
class_names = ["airplane","automobile","bird","cat","deer","dog","frog","horse","ship","truck"]
# 4.2 There are 10 image classes
num_classes = np.unique(y_train).shape[0]
class_names = ["airplane","automobile","bird",
"cat","deer","dog","frog","horse",
"ship","truck"]
# 5. See an image
i = 1
im1 = X_train[i] # Get the ith image array
# 5.1 To which class does it belong
y_train[i]
k = class_names[y_train[i][0]] # Get 0th element of y_train[1]
print(k)
# 5.2 Plot the image
fig = plt.figure(figsize=(4,2))
plt.imshow(im1) # imshow() is a matplotlib method
plt.show()
# 5. Change array types and normalise
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= np.max(X_train) # Normalise data to [0, 1] range
X_test /= np.max(X_test) # It is a global rather than column-wise (axis =0)
# normalization
# 5.1 This is columnwise normalization
a = np.array([[2,3,4], [50,100,200]])
a = a/np.max(a, axis = 0)
a
# 6. One-hot encode the labels
# For all classification problems, even when num_classes is 2
# use OHE with softmax layer. See VGG16 problem for 2-classes.
# For two classes, you can also use one 'sigmoid' at the output
# without earlier performing OHE. See VGG16 problem for 2-classes.
Y_train = np_utils.to_categorical(y_train, num_classes)
Y_train[:5, :]
y_train[:5]
Y_test = np_utils.to_categorical(y_test, num_classes)
Y_test[:3,:4]
#%% D. Model building
# 7. Conv [32] -> Conv [32] -> Pool (with dropout on the pooling layer)
# Same padding means the size of output feature-maps are the same as
# the input feature-maps (under the assumption of stride=1)
# padding= "SAME" tries to pad evenly left and right, but if the amount
# of columns to be added is odd, it will add the extra column to the right
# 7.1
# See keras layers: https://keras.io/layers/about-keras-layers/
model = Sequential()
model.add(Convolution2D(conv_depth_2, # 32 filters
(kernel_size, kernel_size), # 3 X 3
padding='same', # Do zero padding
activation='relu',
input_shape=(height, width, depth) # 32 X 32 X 3
)
)
#7.1.1
model.add(Convolution2D(conv_depth_1,
(kernel_size, kernel_size),
padding='valid',
activation='relu')
)
# 7.1.2
model.add(MaxPooling2D(
pool_size=(pool_size, pool_size)) # 2 X 2
)
# 7.1.3
# https://keras.io/layers/core/#dropout
model.add(Dropout(
drop_prob_1 # 0.25
)
)
# 7.2 Now flatten to 1D, apply FC -> ReLU (with dropout) -> softmax
# Fully connected layer
# https://keras.io/layers/core/#flatten
model.add(Flatten())
# 7.2.1 Output of this dense layer: Ist hidden layer
# https://keras.io/layers/core/#dense
"""
Dense implements the operation:
output = activation(dot(input, kernel) + bias)
where activation is the element-wise activation function
passed as the activation argument, kernel is a weights matrix
created by the layer, and bias is a bias vector created by the
layer (only applicable if use_bias is True).
Size of output has to be specified
"""
model.add(Dense(hidden_size, activation='relu')) # output size = hidden_size
model.add( Dropout
(
drop_prob_2 # 0.5
)
)
# 7.2.2 Final output layer; softmax
# About softmax: https://en.wikipedia.org/wiki/Softmax_function
# exp(xi)/Sigma(exp(xk))
"""
Softmax
If we take an input of [1, 2, 3, 4, 1, 2, 3], the softmax of that
is [0.024, 0.064, 0.175, 0.475, 0.024, 0.064, 0.175]. The output
has most of its weight where the '4' was in the original input.
This is what the function is normally used for: to highlight the
largest values and suppress values which are significantly below
the maximum value.
See calculations at the end of his code.
"""
model.add(
Dense(num_classes,
activation='softmax'
)
)
# 7.3 Compile model and add necesary parameters
# Cross entropy: http://203.122.28.230/moodle/mod/url/view.php?id=1409
#
model.compile(loss='categorical_crossentropy', # using the cross-entropy loss function
optimizer='adam', # using the Adam optimiser
# Ref: https://keras.io/optimizers/
# keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
metrics=['accuracy']) # reporting the accuracy
#%% E. Model training and evaluation
# 8. ...holding out 10% of the data for validation. 26 minutes
# To save time consider just 20000 samples for training
# 10% of these, ie 2000 are for validation
# 8.1
X_train.shape
Y_train.shape
# 8.2 Takes 20 minutes
# Specify fit/train hyperparameters
start = time.time()
history = model.fit(X_train[:20000], # Train model using limited training data
Y_train[:20000],
batch_size=batch_size,
epochs=num_epochs,
verbose=1,
validation_split=0.1
)
end = time.time()
print ((end - start)/60)
# 8.3 Certain parameters are associated
# with 'history' object
history.epoch
history.params
# 8.4 How accuracy changes as epochs increase
# We will use this function agai and again
# in subsequent examples
def plot_history():
|
val_acc = history.history['val_acc']
tr_acc=history.history['acc']
epochs = range(1, len(val_acc) +1)
plt.plot(epochs,val_acc, 'b', label = "Validation accu")
plt.plot(epochs, tr_acc, 'r', label = "Training accu")
plt.title("Training and validation accuracy")
plt.legend()
plt.show()
|
identifier_body
|
|
plugin.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains Maya DCC plugin specific implementation
"""
from __future__ import print_function, division, absolute_import
import os
import json
import logging
import maya.cmds as cmds
import maya.mel as mel
import artella
from artella import dcc
from artella.core.dcc import callback
from artella.core import consts, callbacks, plugins, dccplugin, utils
from artella.dccs.maya import utils as maya_utils
logger = logging.getLogger('artella')
class ArtellaMayaPlugin(dccplugin.BaseArtellaDccPlugin):
def __init__(self, artella_drive_client):
super(ArtellaMayaPlugin, self).__init__(artella_drive_client=artella_drive_client)
self._references_found = list()
def get_version(self, force_update=False):
"""
Returns current DCC plugin version
:param bool force_update: Where or not force the update of the current Artella DCC plugin version
:return: Version in string format (MAJOR.MINOR.PATH) of the current Artella DCC plugin
:rtype: str or None
"""
plugin_version = super(ArtellaMayaPlugin, self).get_version(force_update=force_update)
if not plugin_version or force_update:
version_var = self.get_version_variable_name()
artella_path = artella.__path__[0]
version_file_path = os.path.join(os.path.dirname(artella_path), 'plugin-version.json')
if os.path.isfile(version_file_path):
try:
with open(version_file_path) as fh:
version_data = json.load(fh)
version_found = version_data.get('version', None)
if version_found:
os.environ[version_var] = str(version_found)
except Exception as exc:
logger.error('Impossible to retrieve Artella {} Plugin version data: {}!'.format(dcc.name(), exc))
plugin_version = os.environ.get(version_var, None)
return plugin_version
def init(self, dev=False, show_dialogs=True, create_menu=True, create_callbacks=True, *args, **kwargs):
"""
Initializes Artella DCC plugin
:param bool dev: Whether plugin is initialized in development mode or not
:param bool show_dialogs: Whether dialogs should appear during plugin initialization or not
:param bool create_menu: Whether menu should be created or not
:param bool create_callbacks: Whether or not DCC callbacks should be created
:return: True if the initialization was successful; False otherwise.
:rtype: bool
"""
# Force Maya MEL stack trace on before we start using the plugin
maya_utils.force_mel_stack_trace_on()
super(ArtellaMayaPlugin, self).init(
dev=dev, show_dialogs=show_dialogs, create_menu=create_menu, create_callbacks=create_callbacks,
*args, **kwargs)
def setup_callbacks(self):
"""
Setup DCC Artella callbacks
:return:
"""
super(ArtellaMayaPlugin, self).setup_callbacks()
callbacks.register(callback.Callbacks().AfterOpenCallback, self._after_open)
callbacks.register(callback.Callbacks().SceneBeforeSaveCallback, self._before_save)
callbacks.register(callback.Callbacks().BeforeOpenCheckCallback, self._before_open_check)
callbacks.register(callback.Callbacks().AfterLoadReferenceCallback, self._after_load_reference)
callbacks.register(callback.Callbacks().BeforeCreateReferenceCheckCallback, self._before_reference_check)
def _post_update_paths(self, **kwargs):
"""
Internal function that is called after update paths functionality is over.
"""
files_updated = kwargs.get('files_updated', list())
if not files_updated:
return
maya_utils.reload_textures(files_updated)
# Dependencies are already reloaded during update paths process
# maya_utils.reload_dependencies(files_updated)
# ==============================================================================================================
# FUNCTIONS
# ==============================================================================================================
def setup_project(self, artella_local_root_path):
"""
Setup Artella local root as current DCC active project
This function should be override in specific DCC plugin implementation
Is not an abstract function because its implementation is not mandatory
:param str artella_local_root_path: current user Artella local root path
"""
if not artella_local_root_path:
|
# This can happen when local root path cannot be retrieved from Artella Drive
if isinstance(artella_local_root_path, dict):
return
artella_local_root_path = utils.clean_path(artella_local_root_path)
if utils.is_python2():
artella_local_root_path = artella_local_root_path.decode('utf-8')
artella_local_root_path = cmds.encodeString(artella_local_root_path)
mel.eval('setProject "%s"' % artella_local_root_path.replace('\\', '/'))
cmds.workspace(directory=artella_local_root_path)
cmds.workspace(fileRule=['sourceImages', ''])
cmds.workspace(fileRule=['scene', ''])
cmds.workspace(fileRule=['mayaAscii', ''])
cmds.workspace(fileRule=['mayaBinary', ''])
if utils.is_python2():
logger.info('Set Maya Workspace Path: {}'.format(artella_local_root_path.encode('utf-8')))
else:
logger.info('Set Maya Workspace Path: {}'.format(artella_local_root_path))
def validate_environment_for_callback(self, callback_name):
"""
Checks that all necessary parts are available before executing a Maya callback
:param str callback_name: name of the callback to validate
"""
logger.info('validate_environment_for_callback for {}'.format(callback_name))
client = self.get_client()
if client:
local_root = cmds.encodeString(client.get_local_root())
if local_root:
# We use this to make sure that Artella environment variable is set
logger.debug('set local root in local environment: {}'.format(local_root))
os.environ[consts.ALR] = local_root
os.putenv(consts.ALR, local_root)
mel.eval('putenv "{}" "{}"'.format(consts.ALR, local_root))
if consts.ALR not in os.environ:
msg = 'Unable to execute Maya "{}" callback, {} is not set in the environment'.format(
callback_name, consts.ALR)
logger.error(msg)
raise Exception(msg)
# ==============================================================================================================
# CALLBACKS
# ==============================================================================================================
def _after_open(self, *args):
"""
Internal callback function that is called once a Maya scene is opened
:param args:
"""
if not self.is_artella_path():
return
self.validate_environment_for_callback('AfterOpen')
def _before_save(self, *args):
"""
Internal callback function that is called before saving a Maya scene
:param args:
"""
if not self.is_artella_path():
return
self.validate_environment_for_callback('BeforeSave')
self.update_paths(show_dialogs=False, skip_save=True)
def _before_open_check(self, maya_file, client_data=None):
"""
Internal callback function that is called before a Maya scene is opened
:param bool retcode: Flag that indicates if the file can opened or not
:param MFileObject maya_file: Maya API object that contains info about the file we want to open
:param dict client_data:
"""
file_path = utils.clean_path(maya_file.resolvedFullName())
if self.is_artella_path(file_path):
logger.info('Opening file: "{}"'.format(file_path))
self.validate_environment_for_callback('BeforeOpenCheck')
logger.info('Checking missing dependencies ...')
get_deps_plugin = plugins.get_plugin_by_id('artella-plugins-getdependencies')
if not get_deps_plugin or not get_deps_plugin.is_loaded():
msg = 'Get Dependencies plugin is not loaded. Get dependencies functionality is not available!'
dcc.show_warning('Get Dependencies Plugin not available', msg)
logger.warning(msg)
else:
get_deps_plugin.get_non_available_dependencies(file_path)
return True
def _after_load_reference(self, *args):
"""
Internal callback function that is called after a Maya reference is loaded
:param args:
"""
if not self.is_artella_path():
return
self.validate_environment_for_callback('AfterLoadReference')
def _before_reference_check(self, maya_file, client_data=None):
"""
Internal callback function that is called before a Maya reference is opened
:param bool retcode: Flag that indicates if the file can opened or not
:param MFileObject maya_file: Maya API object that contains info about the file we want to open
:param dict client_data:
"""
if self.is_artella_path():
self.validate_environment_for_callback('BeforeReferenceCheck')
raw_full_name = maya_file.rawFullName()
if not dccplugin.DccPlugin().is_path_translated(
raw_full_name) and dccplugin.DccPlugin().is_artella_path(raw_full_name):
convert_path = dccplugin.DccPlugin().convert_path(raw_full_name)
maya_file.setRawFullName(convert_path)
return True
|
logger.warning('No Project Path to setup. Skipping setup project ...')
return
|
conditional_block
|
plugin.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains Maya DCC plugin specific implementation
"""
from __future__ import print_function, division, absolute_import
import os
import json
import logging
import maya.cmds as cmds
import maya.mel as mel
import artella
from artella import dcc
from artella.core.dcc import callback
from artella.core import consts, callbacks, plugins, dccplugin, utils
from artella.dccs.maya import utils as maya_utils
logger = logging.getLogger('artella')
class ArtellaMayaPlugin(dccplugin.BaseArtellaDccPlugin):
|
if os.path.isfile(version_file_path):
try:
with open(version_file_path) as fh:
version_data = json.load(fh)
version_found = version_data.get('version', None)
if version_found:
os.environ[version_var] = str(version_found)
except Exception as exc:
logger.error('Impossible to retrieve Artella {} Plugin version data: {}!'.format(dcc.name(), exc))
plugin_version = os.environ.get(version_var, None)
return plugin_version
def init(self, dev=False, show_dialogs=True, create_menu=True, create_callbacks=True, *args, **kwargs):
"""
Initializes Artella DCC plugin
:param bool dev: Whether plugin is initialized in development mode or not
:param bool show_dialogs: Whether dialogs should appear during plugin initialization or not
:param bool create_menu: Whether menu should be created or not
:param bool create_callbacks: Whether or not DCC callbacks should be created
:return: True if the initialization was successful; False otherwise.
:rtype: bool
"""
# Force Maya MEL stack trace on before we start using the plugin
maya_utils.force_mel_stack_trace_on()
super(ArtellaMayaPlugin, self).init(
dev=dev, show_dialogs=show_dialogs, create_menu=create_menu, create_callbacks=create_callbacks,
*args, **kwargs)
def setup_callbacks(self):
"""
Setup DCC Artella callbacks
:return:
"""
super(ArtellaMayaPlugin, self).setup_callbacks()
callbacks.register(callback.Callbacks().AfterOpenCallback, self._after_open)
callbacks.register(callback.Callbacks().SceneBeforeSaveCallback, self._before_save)
callbacks.register(callback.Callbacks().BeforeOpenCheckCallback, self._before_open_check)
callbacks.register(callback.Callbacks().AfterLoadReferenceCallback, self._after_load_reference)
callbacks.register(callback.Callbacks().BeforeCreateReferenceCheckCallback, self._before_reference_check)
def _post_update_paths(self, **kwargs):
"""
Internal function that is called after update paths functionality is over.
"""
files_updated = kwargs.get('files_updated', list())
if not files_updated:
return
maya_utils.reload_textures(files_updated)
# Dependencies are already reloaded during update paths process
# maya_utils.reload_dependencies(files_updated)
# ==============================================================================================================
# FUNCTIONS
# ==============================================================================================================
def setup_project(self, artella_local_root_path):
"""
Setup Artella local root as current DCC active project
This function should be override in specific DCC plugin implementation
Is not an abstract function because its implementation is not mandatory
:param str artella_local_root_path: current user Artella local root path
"""
if not artella_local_root_path:
logger.warning('No Project Path to setup. Skipping setup project ...')
return
# This can happen when local root path cannot be retrieved from Artella Drive
if isinstance(artella_local_root_path, dict):
return
artella_local_root_path = utils.clean_path(artella_local_root_path)
if utils.is_python2():
artella_local_root_path = artella_local_root_path.decode('utf-8')
artella_local_root_path = cmds.encodeString(artella_local_root_path)
mel.eval('setProject "%s"' % artella_local_root_path.replace('\\', '/'))
cmds.workspace(directory=artella_local_root_path)
cmds.workspace(fileRule=['sourceImages', ''])
cmds.workspace(fileRule=['scene', ''])
cmds.workspace(fileRule=['mayaAscii', ''])
cmds.workspace(fileRule=['mayaBinary', ''])
if utils.is_python2():
logger.info('Set Maya Workspace Path: {}'.format(artella_local_root_path.encode('utf-8')))
else:
logger.info('Set Maya Workspace Path: {}'.format(artella_local_root_path))
def validate_environment_for_callback(self, callback_name):
"""
Checks that all necessary parts are available before executing a Maya callback
:param str callback_name: name of the callback to validate
"""
logger.info('validate_environment_for_callback for {}'.format(callback_name))
client = self.get_client()
if client:
local_root = cmds.encodeString(client.get_local_root())
if local_root:
# We use this to make sure that Artella environment variable is set
logger.debug('set local root in local environment: {}'.format(local_root))
os.environ[consts.ALR] = local_root
os.putenv(consts.ALR, local_root)
mel.eval('putenv "{}" "{}"'.format(consts.ALR, local_root))
if consts.ALR not in os.environ:
msg = 'Unable to execute Maya "{}" callback, {} is not set in the environment'.format(
callback_name, consts.ALR)
logger.error(msg)
raise Exception(msg)
# ==============================================================================================================
# CALLBACKS
# ==============================================================================================================
def _after_open(self, *args):
"""
Internal callback function that is called once a Maya scene is opened
:param args:
"""
if not self.is_artella_path():
return
self.validate_environment_for_callback('AfterOpen')
def _before_save(self, *args):
"""
Internal callback function that is called before saving a Maya scene
:param args:
"""
if not self.is_artella_path():
return
self.validate_environment_for_callback('BeforeSave')
self.update_paths(show_dialogs=False, skip_save=True)
def _before_open_check(self, maya_file, client_data=None):
"""
Internal callback function that is called before a Maya scene is opened
:param bool retcode: Flag that indicates if the file can opened or not
:param MFileObject maya_file: Maya API object that contains info about the file we want to open
:param dict client_data:
"""
file_path = utils.clean_path(maya_file.resolvedFullName())
if self.is_artella_path(file_path):
logger.info('Opening file: "{}"'.format(file_path))
self.validate_environment_for_callback('BeforeOpenCheck')
logger.info('Checking missing dependencies ...')
get_deps_plugin = plugins.get_plugin_by_id('artella-plugins-getdependencies')
if not get_deps_plugin or not get_deps_plugin.is_loaded():
msg = 'Get Dependencies plugin is not loaded. Get dependencies functionality is not available!'
dcc.show_warning('Get Dependencies Plugin not available', msg)
logger.warning(msg)
else:
get_deps_plugin.get_non_available_dependencies(file_path)
return True
def _after_load_reference(self, *args):
"""
Internal callback function that is called after a Maya reference is loaded
:param args:
"""
if not self.is_artella_path():
return
self.validate_environment_for_callback('AfterLoadReference')
def _before_reference_check(self, maya_file, client_data=None):
"""
Internal callback function that is called before a Maya reference is opened
:param bool retcode: Flag that indicates if the file can opened or not
:param MFileObject maya_file: Maya API object that contains info about the file we want to open
:param dict client_data:
"""
if self.is_artella_path():
self.validate_environment_for_callback('BeforeReferenceCheck')
raw_full_name = maya_file.rawFullName()
if not dccplugin.DccPlugin().is_path_translated(
raw_full_name) and dccplugin.DccPlugin().is_artella_path(raw_full_name):
convert_path = dccplugin.DccPlugin().convert_path(raw_full_name)
maya_file.setRawFullName(convert_path)
return True
|
def __init__(self, artella_drive_client):
super(ArtellaMayaPlugin, self).__init__(artella_drive_client=artella_drive_client)
self._references_found = list()
def get_version(self, force_update=False):
"""
Returns current DCC plugin version
:param bool force_update: Where or not force the update of the current Artella DCC plugin version
:return: Version in string format (MAJOR.MINOR.PATH) of the current Artella DCC plugin
:rtype: str or None
"""
plugin_version = super(ArtellaMayaPlugin, self).get_version(force_update=force_update)
if not plugin_version or force_update:
version_var = self.get_version_variable_name()
artella_path = artella.__path__[0]
version_file_path = os.path.join(os.path.dirname(artella_path), 'plugin-version.json')
|
identifier_body
|
plugin.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains Maya DCC plugin specific implementation
"""
from __future__ import print_function, division, absolute_import
import os
import json
import logging
import maya.cmds as cmds
import maya.mel as mel
import artella
from artella import dcc
from artella.core.dcc import callback
from artella.core import consts, callbacks, plugins, dccplugin, utils
from artella.dccs.maya import utils as maya_utils
logger = logging.getLogger('artella')
class ArtellaMayaPlugin(dccplugin.BaseArtellaDccPlugin):
def __init__(self, artella_drive_client):
super(ArtellaMayaPlugin, self).__init__(artella_drive_client=artella_drive_client)
self._references_found = list()
def get_version(self, force_update=False):
"""
Returns current DCC plugin version
:param bool force_update: Where or not force the update of the current Artella DCC plugin version
:return: Version in string format (MAJOR.MINOR.PATH) of the current Artella DCC plugin
:rtype: str or None
"""
plugin_version = super(ArtellaMayaPlugin, self).get_version(force_update=force_update)
if not plugin_version or force_update:
version_var = self.get_version_variable_name()
artella_path = artella.__path__[0]
version_file_path = os.path.join(os.path.dirname(artella_path), 'plugin-version.json')
if os.path.isfile(version_file_path):
try:
with open(version_file_path) as fh:
version_data = json.load(fh)
version_found = version_data.get('version', None)
if version_found:
os.environ[version_var] = str(version_found)
except Exception as exc:
logger.error('Impossible to retrieve Artella {} Plugin version data: {}!'.format(dcc.name(), exc))
plugin_version = os.environ.get(version_var, None)
return plugin_version
def init(self, dev=False, show_dialogs=True, create_menu=True, create_callbacks=True, *args, **kwargs):
"""
Initializes Artella DCC plugin
:param bool dev: Whether plugin is initialized in development mode or not
|
:param bool create_callbacks: Whether or not DCC callbacks should be created
:return: True if the initialization was successful; False otherwise.
:rtype: bool
"""
# Force Maya MEL stack trace on before we start using the plugin
maya_utils.force_mel_stack_trace_on()
super(ArtellaMayaPlugin, self).init(
dev=dev, show_dialogs=show_dialogs, create_menu=create_menu, create_callbacks=create_callbacks,
*args, **kwargs)
def setup_callbacks(self):
"""
Setup DCC Artella callbacks
:return:
"""
super(ArtellaMayaPlugin, self).setup_callbacks()
callbacks.register(callback.Callbacks().AfterOpenCallback, self._after_open)
callbacks.register(callback.Callbacks().SceneBeforeSaveCallback, self._before_save)
callbacks.register(callback.Callbacks().BeforeOpenCheckCallback, self._before_open_check)
callbacks.register(callback.Callbacks().AfterLoadReferenceCallback, self._after_load_reference)
callbacks.register(callback.Callbacks().BeforeCreateReferenceCheckCallback, self._before_reference_check)
def _post_update_paths(self, **kwargs):
"""
Internal function that is called after update paths functionality is over.
"""
files_updated = kwargs.get('files_updated', list())
if not files_updated:
return
maya_utils.reload_textures(files_updated)
# Dependencies are already reloaded during update paths process
# maya_utils.reload_dependencies(files_updated)
# ==============================================================================================================
# FUNCTIONS
# ==============================================================================================================
def setup_project(self, artella_local_root_path):
"""
Setup Artella local root as current DCC active project
This function should be override in specific DCC plugin implementation
Is not an abstract function because its implementation is not mandatory
:param str artella_local_root_path: current user Artella local root path
"""
if not artella_local_root_path:
logger.warning('No Project Path to setup. Skipping setup project ...')
return
# This can happen when local root path cannot be retrieved from Artella Drive
if isinstance(artella_local_root_path, dict):
return
artella_local_root_path = utils.clean_path(artella_local_root_path)
if utils.is_python2():
artella_local_root_path = artella_local_root_path.decode('utf-8')
artella_local_root_path = cmds.encodeString(artella_local_root_path)
mel.eval('setProject "%s"' % artella_local_root_path.replace('\\', '/'))
cmds.workspace(directory=artella_local_root_path)
cmds.workspace(fileRule=['sourceImages', ''])
cmds.workspace(fileRule=['scene', ''])
cmds.workspace(fileRule=['mayaAscii', ''])
cmds.workspace(fileRule=['mayaBinary', ''])
if utils.is_python2():
logger.info('Set Maya Workspace Path: {}'.format(artella_local_root_path.encode('utf-8')))
else:
logger.info('Set Maya Workspace Path: {}'.format(artella_local_root_path))
def validate_environment_for_callback(self, callback_name):
"""
Checks that all necessary parts are available before executing a Maya callback
:param str callback_name: name of the callback to validate
"""
logger.info('validate_environment_for_callback for {}'.format(callback_name))
client = self.get_client()
if client:
local_root = cmds.encodeString(client.get_local_root())
if local_root:
# We use this to make sure that Artella environment variable is set
logger.debug('set local root in local environment: {}'.format(local_root))
os.environ[consts.ALR] = local_root
os.putenv(consts.ALR, local_root)
mel.eval('putenv "{}" "{}"'.format(consts.ALR, local_root))
if consts.ALR not in os.environ:
msg = 'Unable to execute Maya "{}" callback, {} is not set in the environment'.format(
callback_name, consts.ALR)
logger.error(msg)
raise Exception(msg)
# ==============================================================================================================
# CALLBACKS
# ==============================================================================================================
def _after_open(self, *args):
"""
Internal callback function that is called once a Maya scene is opened
:param args:
"""
if not self.is_artella_path():
return
self.validate_environment_for_callback('AfterOpen')
def _before_save(self, *args):
"""
Internal callback function that is called before saving a Maya scene
:param args:
"""
if not self.is_artella_path():
return
self.validate_environment_for_callback('BeforeSave')
self.update_paths(show_dialogs=False, skip_save=True)
def _before_open_check(self, maya_file, client_data=None):
"""
Internal callback function that is called before a Maya scene is opened
:param bool retcode: Flag that indicates if the file can opened or not
:param MFileObject maya_file: Maya API object that contains info about the file we want to open
:param dict client_data:
"""
file_path = utils.clean_path(maya_file.resolvedFullName())
if self.is_artella_path(file_path):
logger.info('Opening file: "{}"'.format(file_path))
self.validate_environment_for_callback('BeforeOpenCheck')
logger.info('Checking missing dependencies ...')
get_deps_plugin = plugins.get_plugin_by_id('artella-plugins-getdependencies')
if not get_deps_plugin or not get_deps_plugin.is_loaded():
msg = 'Get Dependencies plugin is not loaded. Get dependencies functionality is not available!'
dcc.show_warning('Get Dependencies Plugin not available', msg)
logger.warning(msg)
else:
get_deps_plugin.get_non_available_dependencies(file_path)
return True
def _after_load_reference(self, *args):
"""
Internal callback function that is called after a Maya reference is loaded
:param args:
"""
if not self.is_artella_path():
return
self.validate_environment_for_callback('AfterLoadReference')
def _before_reference_check(self, maya_file, client_data=None):
"""
Internal callback function that is called before a Maya reference is opened
:param bool retcode: Flag that indicates if the file can opened or not
:param MFileObject maya_file: Maya API object that contains info about the file we want to open
:param dict client_data:
"""
if self.is_artella_path():
self.validate_environment_for_callback('BeforeReferenceCheck')
raw_full_name = maya_file.rawFullName()
if not dccplugin.DccPlugin().is_path_translated(
raw_full_name) and dccplugin.DccPlugin().is_artella_path(raw_full_name):
convert_path = dccplugin.DccPlugin().convert_path(raw_full_name)
maya_file.setRawFullName(convert_path)
return True
|
:param bool show_dialogs: Whether dialogs should appear during plugin initialization or not
:param bool create_menu: Whether menu should be created or not
|
random_line_split
|
plugin.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains Maya DCC plugin specific implementation
"""
from __future__ import print_function, division, absolute_import
import os
import json
import logging
import maya.cmds as cmds
import maya.mel as mel
import artella
from artella import dcc
from artella.core.dcc import callback
from artella.core import consts, callbacks, plugins, dccplugin, utils
from artella.dccs.maya import utils as maya_utils
logger = logging.getLogger('artella')
class ArtellaMayaPlugin(dccplugin.BaseArtellaDccPlugin):
def __init__(self, artella_drive_client):
super(ArtellaMayaPlugin, self).__init__(artella_drive_client=artella_drive_client)
self._references_found = list()
def get_version(self, force_update=False):
"""
Returns current DCC plugin version
:param bool force_update: Where or not force the update of the current Artella DCC plugin version
:return: Version in string format (MAJOR.MINOR.PATH) of the current Artella DCC plugin
:rtype: str or None
"""
plugin_version = super(ArtellaMayaPlugin, self).get_version(force_update=force_update)
if not plugin_version or force_update:
version_var = self.get_version_variable_name()
artella_path = artella.__path__[0]
version_file_path = os.path.join(os.path.dirname(artella_path), 'plugin-version.json')
if os.path.isfile(version_file_path):
try:
with open(version_file_path) as fh:
version_data = json.load(fh)
version_found = version_data.get('version', None)
if version_found:
os.environ[version_var] = str(version_found)
except Exception as exc:
logger.error('Impossible to retrieve Artella {} Plugin version data: {}!'.format(dcc.name(), exc))
plugin_version = os.environ.get(version_var, None)
return plugin_version
def init(self, dev=False, show_dialogs=True, create_menu=True, create_callbacks=True, *args, **kwargs):
"""
Initializes Artella DCC plugin
:param bool dev: Whether plugin is initialized in development mode or not
:param bool show_dialogs: Whether dialogs should appear during plugin initialization or not
:param bool create_menu: Whether menu should be created or not
:param bool create_callbacks: Whether or not DCC callbacks should be created
:return: True if the initialization was successful; False otherwise.
:rtype: bool
"""
# Force Maya MEL stack trace on before we start using the plugin
maya_utils.force_mel_stack_trace_on()
super(ArtellaMayaPlugin, self).init(
dev=dev, show_dialogs=show_dialogs, create_menu=create_menu, create_callbacks=create_callbacks,
*args, **kwargs)
def setup_callbacks(self):
"""
Setup DCC Artella callbacks
:return:
"""
super(ArtellaMayaPlugin, self).setup_callbacks()
callbacks.register(callback.Callbacks().AfterOpenCallback, self._after_open)
callbacks.register(callback.Callbacks().SceneBeforeSaveCallback, self._before_save)
callbacks.register(callback.Callbacks().BeforeOpenCheckCallback, self._before_open_check)
callbacks.register(callback.Callbacks().AfterLoadReferenceCallback, self._after_load_reference)
callbacks.register(callback.Callbacks().BeforeCreateReferenceCheckCallback, self._before_reference_check)
def _post_update_paths(self, **kwargs):
"""
Internal function that is called after update paths functionality is over.
"""
files_updated = kwargs.get('files_updated', list())
if not files_updated:
return
maya_utils.reload_textures(files_updated)
# Dependencies are already reloaded during update paths process
# maya_utils.reload_dependencies(files_updated)
# ==============================================================================================================
# FUNCTIONS
# ==============================================================================================================
def setup_project(self, artella_local_root_path):
"""
Setup Artella local root as current DCC active project
This function should be override in specific DCC plugin implementation
Is not an abstract function because its implementation is not mandatory
:param str artella_local_root_path: current user Artella local root path
"""
if not artella_local_root_path:
logger.warning('No Project Path to setup. Skipping setup project ...')
return
# This can happen when local root path cannot be retrieved from Artella Drive
if isinstance(artella_local_root_path, dict):
return
artella_local_root_path = utils.clean_path(artella_local_root_path)
if utils.is_python2():
artella_local_root_path = artella_local_root_path.decode('utf-8')
artella_local_root_path = cmds.encodeString(artella_local_root_path)
mel.eval('setProject "%s"' % artella_local_root_path.replace('\\', '/'))
cmds.workspace(directory=artella_local_root_path)
cmds.workspace(fileRule=['sourceImages', ''])
cmds.workspace(fileRule=['scene', ''])
cmds.workspace(fileRule=['mayaAscii', ''])
cmds.workspace(fileRule=['mayaBinary', ''])
if utils.is_python2():
logger.info('Set Maya Workspace Path: {}'.format(artella_local_root_path.encode('utf-8')))
else:
logger.info('Set Maya Workspace Path: {}'.format(artella_local_root_path))
def validate_environment_for_callback(self, callback_name):
"""
Checks that all necessary parts are available before executing a Maya callback
:param str callback_name: name of the callback to validate
"""
logger.info('validate_environment_for_callback for {}'.format(callback_name))
client = self.get_client()
if client:
local_root = cmds.encodeString(client.get_local_root())
if local_root:
# We use this to make sure that Artella environment variable is set
logger.debug('set local root in local environment: {}'.format(local_root))
os.environ[consts.ALR] = local_root
os.putenv(consts.ALR, local_root)
mel.eval('putenv "{}" "{}"'.format(consts.ALR, local_root))
if consts.ALR not in os.environ:
msg = 'Unable to execute Maya "{}" callback, {} is not set in the environment'.format(
callback_name, consts.ALR)
logger.error(msg)
raise Exception(msg)
# ==============================================================================================================
# CALLBACKS
# ==============================================================================================================
def _after_open(self, *args):
"""
Internal callback function that is called once a Maya scene is opened
:param args:
"""
if not self.is_artella_path():
return
self.validate_environment_for_callback('AfterOpen')
def _before_save(self, *args):
"""
Internal callback function that is called before saving a Maya scene
:param args:
"""
if not self.is_artella_path():
return
self.validate_environment_for_callback('BeforeSave')
self.update_paths(show_dialogs=False, skip_save=True)
def _before_open_check(self, maya_file, client_data=None):
"""
Internal callback function that is called before a Maya scene is opened
:param bool retcode: Flag that indicates if the file can opened or not
:param MFileObject maya_file: Maya API object that contains info about the file we want to open
:param dict client_data:
"""
file_path = utils.clean_path(maya_file.resolvedFullName())
if self.is_artella_path(file_path):
logger.info('Opening file: "{}"'.format(file_path))
self.validate_environment_for_callback('BeforeOpenCheck')
logger.info('Checking missing dependencies ...')
get_deps_plugin = plugins.get_plugin_by_id('artella-plugins-getdependencies')
if not get_deps_plugin or not get_deps_plugin.is_loaded():
msg = 'Get Dependencies plugin is not loaded. Get dependencies functionality is not available!'
dcc.show_warning('Get Dependencies Plugin not available', msg)
logger.warning(msg)
else:
get_deps_plugin.get_non_available_dependencies(file_path)
return True
def
|
(self, *args):
"""
Internal callback function that is called after a Maya reference is loaded
:param args:
"""
if not self.is_artella_path():
return
self.validate_environment_for_callback('AfterLoadReference')
def _before_reference_check(self, maya_file, client_data=None):
"""
Internal callback function that is called before a Maya reference is opened
:param bool retcode: Flag that indicates if the file can opened or not
:param MFileObject maya_file: Maya API object that contains info about the file we want to open
:param dict client_data:
"""
if self.is_artella_path():
self.validate_environment_for_callback('BeforeReferenceCheck')
raw_full_name = maya_file.rawFullName()
if not dccplugin.DccPlugin().is_path_translated(
raw_full_name) and dccplugin.DccPlugin().is_artella_path(raw_full_name):
convert_path = dccplugin.DccPlugin().convert_path(raw_full_name)
maya_file.setRawFullName(convert_path)
return True
|
_after_load_reference
|
identifier_name
|
model.py
|
False
After that, the "embeds" are linearly mapped to "gamma" and "bias"
These "gamma" and "bias" are applied to the outputs like in batch normalization
with affine = True (see definition of batch normalization for reference)
"""
def __init__(self, num_features: int, embed_features: int):
super(AdaptiveBatchNorm, self).__init__(num_features, affine=False)
# TODO
self.lin1 = nn.Linear(embed_features, num_features)
self.lin2 = nn.Linear(embed_features, num_features)
def forward(self, inputs, embeds):
gamma = self.lin1(embeds) # TODO
bias = self.lin2(embeds) # TODO
assert gamma.shape[0] == inputs.shape[0] and gamma.shape[1] == inputs.shape[1]
outputs = super().forward(inputs) # TODO: apply batchnorm
return outputs * gamma[..., None, None] + bias[..., None, None]
class PreActResBlock(nn.Module):
"""
Pre-activation residual block (6 points)
Paper: https://arxiv.org/pdf/1603.05027.pdf
Scheme: materials/preactresblock.png
Review: https://towardsdatascience.com/resnet-with-identity-mapping-over-1000-layers-reached-image-classification-bb50a42af03e
Args:
in_channels: input number of channels
out_channels: output number of channels
batchnorm: this block is with/without adaptive batch normalization
upsample: use nearest neighbours upsampling at the beginning
downsample: use average pooling after the end
in_channels != out_channels:
- first conv: in_channels -> out_channels
- second conv: out_channels -> out_channels
- use 1x1 conv in skip connection
in_channels == out_channels: skip connection is without a conv
"""
def __init__(self,
in_channels: int,
out_channels: int,
embed_channels: int = None,
batchnorm: bool = False,
upsample: bool = False,
downsample: bool = False):
super(PreActResBlock, self).__init__()
# TODO: define pre-activation residual block
# TODO: apply spectral normalization to conv layers
# Don't forget that activation after residual sum cannot be inplace!
self.batchnorm = batchnorm
self.upsample = upsample
self.downsample = downsample
if self.upsample:
self.up = nn.UpsamplingNearest2d(scale_factor=2)
if self.downsample:
self.down = nn.AvgPool2d(kernel_size=2)
self.skip_connection = torch.nn.utils.spectral_norm(nn.Conv2d(in_channels, out_channels, 1))
if self.batchnorm:
self.ad_norm1 = AdaptiveBatchNorm(in_channels, embed_channels)
else:
self.ad_norm1 = nn.Identity()
self.relu1 = nn.ReLU()
self.sp_norm_conv1 = spectral_norm(nn.Conv2d(in_channels, out_channels, 3, padding=1))
if self.batchnorm:
self.ad_norm2 = AdaptiveBatchNorm(out_channels, embed_channels)
else:
self.ad_norm2 = nn.Identity()
self.relu2 = nn.ReLU()
self.sp_norm_conv2 = spectral_norm(nn.Conv2d(out_channels, out_channels, 3, padding=1))
def forward(self,
inputs, # regular features
embeds=None): # embeds used in adaptive batch norm
# TODO
if self.upsample:
inputs = self.up(inputs)
if self.batchnorm:
outputs = self.ad_norm1(inputs, embeds)
else:
outputs = self.ad_norm1(inputs)
outputs = self.sp_norm_conv1(self.relu1(outputs))
if self.batchnorm:
outputs = self.ad_norm2(outputs, embeds)
else:
outputs = self.ad_norm2(outputs)
outputs = self.sp_norm_conv2(self.relu2(outputs))
# skip connection
outputs += self.skip_connection(inputs)
if self.downsample:
outputs = self.down(outputs)
return outputs
class Generator(nn.Module):
"""
Generator network (8 points)
TODO:
- Implement an option to condition the synthesis on trainable class embeddings
(use nn.Embedding module with noise_channels as the size of each embed)
- Concatenate input noise with class embeddings (if use_class_condition = True) to obtain input embeddings
- Linearly map input embeddings into input tensor with the following dims: max_channels x 4 x 4
- Forward an input tensor through a convolutional part,
which consists of num_blocks PreActResBlocks and performs upsampling by a factor of 2 in each block
- Each PreActResBlock is additionally conditioned on the input embeddings (via adaptive batch normalization)
- At the end of the convolutional part apply regular BN, ReLU and Conv as an image prediction head
- Apply spectral norm to all conv and linear layers (not the embedding layer)
- Use Sigmoid at the end to map the outputs into an image
Notes:
- The last convolutional layer should map min_channels to 3. With each upsampling you should decrease
the number of channels by a factor of 2
- Class embeddings are only used and trained if use_class_condition = True
"""
def __init__(self,
min_channels: int,
max_channels: int,
noise_channels: int,
num_classes: int,
num_blocks: int,
use_class_condition: bool):
|
nn.ReLU(),
torch.nn.utils.spectral_norm(nn.Conv2d(min_channels, 3, 3, padding=1)),
nn.Sigmoid()
)
def forward(self, noise, labels):
# TODO
if self.use_class_condition:
noise = torch.cat((self.embed(labels), noise), dim=-1)
outputs = self.sp_norm_lin(noise).view(-1, self.max_channels, 4, 4)
outputs = self.parb1(outputs, noise)
outputs = self.parb2(outputs, noise)
outputs = self.parb3(outputs, noise)
outputs = self.parb4(outputs, noise)
outputs = self.head(outputs)
assert outputs.shape == (noise.shape[0], 3, self.output_size, self.output_size)
return outputs
class Discriminator(nn.Module):
"""
Discriminator network (8 points)
TODO:
- Define a convolutional part of the discriminator similarly to
the generator blocks, but in the inverse order, with downsampling, and
without batch normalization
- At the end of the convolutional part apply ReLU and sum pooling
TODO: implement projection discriminator head (https://arxiv.org/abs/1802.05637)
Scheme: materials/prgan.png
Notation:
- phi is a convolutional part of the discriminator
- psi is a vector
- y is a class embedding
Class embeddings matrix is similar to the generator, shape: num_classes x max_channels
Discriminator outputs a B x 1 matrix of realism scores
Apply spectral norm for all layers (conv, linear, embedding)
"""
def __init__(self,
min_channels: int,
max_channels: int,
num_classes: int,
num_blocks: int,
use_projection_head: bool):
super(Discriminator, self).__init__()
# TODO
self.use_projection_head = use_projection_head
self.head = nn.Sequential(
spectral_norm(nn.Conv2d(3, min_channels, 3, padding=1)),
nn.ReLU(),
nn.BatchNorm2d(min_channels)
)
self.parb1 = PreActResBlock(min_channels, min_channels * 2, downsample=True)
self.parb2 = PreActResBlock(min_channels * 2, min_channels * 4, downsample=True)
self.parb3 = PreActResBlock(min_channels * 4, min_channels * 8
|
super(Generator, self).__init__()
self.output_size = 4 * 2**num_blocks
# TODO
self.max_channels = max_channels
self.use_class_condition = use_class_condition
self.embed = torch.nn.Embedding(num_classes, noise_channels)
if self.use_class_condition:
noise_channels = noise_channels*2
self.sp_norm_lin = torch.nn.utils.spectral_norm(nn.Linear(noise_channels, 4*4*self.max_channels))
self.parb1 = PreActResBlock(self.max_channels, self.max_channels // 2, embed_channels=noise_channels, batchnorm=self.use_class_condition, upsample=True)
self.parb2 = PreActResBlock(self.max_channels // 2, self.max_channels // 4, embed_channels=noise_channels, batchnorm=self.use_class_condition, upsample=True)
self.parb3 = PreActResBlock(self.max_channels // 4, self.max_channels // 8, embed_channels=noise_channels, batchnorm=self.use_class_condition, upsample=True)
self.parb4 = PreActResBlock(self.max_channels // 8, self.max_channels // 16, embed_channels=noise_channels, batchnorm=self.use_class_condition, upsample=True)
self.head = nn.Sequential(
nn.BatchNorm2d(min_channels),
|
identifier_body
|
model.py
|
After that, the "embeds" are linearly mapped to "gamma" and "bias"
These "gamma" and "bias" are applied to the outputs like in batch normalization
with affine = True (see definition of batch normalization for reference)
"""
def __init__(self, num_features: int, embed_features: int):
super(AdaptiveBatchNorm, self).__init__(num_features, affine=False)
# TODO
self.lin1 = nn.Linear(embed_features, num_features)
self.lin2 = nn.Linear(embed_features, num_features)
def forward(self, inputs, embeds):
gamma = self.lin1(embeds) # TODO
bias = self.lin2(embeds) # TODO
assert gamma.shape[0] == inputs.shape[0] and gamma.shape[1] == inputs.shape[1]
outputs = super().forward(inputs) # TODO: apply batchnorm
return outputs * gamma[..., None, None] + bias[..., None, None]
class PreActResBlock(nn.Module):
"""
Pre-activation residual block (6 points)
Paper: https://arxiv.org/pdf/1603.05027.pdf
Scheme: materials/preactresblock.png
Review: https://towardsdatascience.com/resnet-with-identity-mapping-over-1000-layers-reached-image-classification-bb50a42af03e
Args:
in_channels: input number of channels
out_channels: output number of channels
batchnorm: this block is with/without adaptive batch normalization
upsample: use nearest neighbours upsampling at the beginning
downsample: use average pooling after the end
in_channels != out_channels:
- first conv: in_channels -> out_channels
- second conv: out_channels -> out_channels
- use 1x1 conv in skip connection
in_channels == out_channels: skip connection is without a conv
"""
def __init__(self,
in_channels: int,
out_channels: int,
embed_channels: int = None,
batchnorm: bool = False,
upsample: bool = False,
downsample: bool = False):
super(PreActResBlock, self).__init__()
# TODO: define pre-activation residual block
# TODO: apply spectral normalization to conv layers
# Don't forget that activation after residual sum cannot be inplace!
self.batchnorm = batchnorm
self.upsample = upsample
self.downsample = downsample
if self.upsample:
self.up = nn.UpsamplingNearest2d(scale_factor=2)
if self.downsample:
self.down = nn.AvgPool2d(kernel_size=2)
self.skip_connection = torch.nn.utils.spectral_norm(nn.Conv2d(in_channels, out_channels, 1))
if self.batchnorm:
self.ad_norm1 = AdaptiveBatchNorm(in_channels, embed_channels)
else:
self.ad_norm1 = nn.Identity()
self.relu1 = nn.ReLU()
self.sp_norm_conv1 = spectral_norm(nn.Conv2d(in_channels, out_channels, 3, padding=1))
if self.batchnorm:
self.ad_norm2 = AdaptiveBatchNorm(out_channels, embed_channels)
else:
self.ad_norm2 = nn.Identity()
self.relu2 = nn.ReLU()
self.sp_norm_conv2 = spectral_norm(nn.Conv2d(out_channels, out_channels, 3, padding=1))
def forward(self,
inputs, # regular features
embeds=None): # embeds used in adaptive batch norm
# TODO
if self.upsample:
inputs = self.up(inputs)
if self.batchnorm:
outputs = self.ad_norm1(inputs, embeds)
else:
outputs = self.ad_norm1(inputs)
outputs = self.sp_norm_conv1(self.relu1(outputs))
if self.batchnorm:
outputs = self.ad_norm2(outputs, embeds)
else:
outputs = self.ad_norm2(outputs)
outputs = self.sp_norm_conv2(self.relu2(outputs))
# skip connection
outputs += self.skip_connection(inputs)
if self.downsample:
|
return outputs
class Generator(nn.Module):
"""
Generator network (8 points)
TODO:
- Implement an option to condition the synthesis on trainable class embeddings
(use nn.Embedding module with noise_channels as the size of each embed)
- Concatenate input noise with class embeddings (if use_class_condition = True) to obtain input embeddings
- Linearly map input embeddings into input tensor with the following dims: max_channels x 4 x 4
- Forward an input tensor through a convolutional part,
which consists of num_blocks PreActResBlocks and performs upsampling by a factor of 2 in each block
- Each PreActResBlock is additionally conditioned on the input embeddings (via adaptive batch normalization)
- At the end of the convolutional part apply regular BN, ReLU and Conv as an image prediction head
- Apply spectral norm to all conv and linear layers (not the embedding layer)
- Use Sigmoid at the end to map the outputs into an image
Notes:
- The last convolutional layer should map min_channels to 3. With each upsampling you should decrease
the number of channels by a factor of 2
- Class embeddings are only used and trained if use_class_condition = True
"""
def __init__(self,
min_channels: int,
max_channels: int,
noise_channels: int,
num_classes: int,
num_blocks: int,
use_class_condition: bool):
super(Generator, self).__init__()
self.output_size = 4 * 2**num_blocks
# TODO
self.max_channels = max_channels
self.use_class_condition = use_class_condition
self.embed = torch.nn.Embedding(num_classes, noise_channels)
if self.use_class_condition:
noise_channels = noise_channels*2
self.sp_norm_lin = torch.nn.utils.spectral_norm(nn.Linear(noise_channels, 4*4*self.max_channels))
self.parb1 = PreActResBlock(self.max_channels, self.max_channels // 2, embed_channels=noise_channels, batchnorm=self.use_class_condition, upsample=True)
self.parb2 = PreActResBlock(self.max_channels // 2, self.max_channels // 4, embed_channels=noise_channels, batchnorm=self.use_class_condition, upsample=True)
self.parb3 = PreActResBlock(self.max_channels // 4, self.max_channels // 8, embed_channels=noise_channels, batchnorm=self.use_class_condition, upsample=True)
self.parb4 = PreActResBlock(self.max_channels // 8, self.max_channels // 16, embed_channels=noise_channels, batchnorm=self.use_class_condition, upsample=True)
self.head = nn.Sequential(
nn.BatchNorm2d(min_channels),
nn.ReLU(),
torch.nn.utils.spectral_norm(nn.Conv2d(min_channels, 3, 3, padding=1)),
nn.Sigmoid()
)
def forward(self, noise, labels):
# TODO
if self.use_class_condition:
noise = torch.cat((self.embed(labels), noise), dim=-1)
outputs = self.sp_norm_lin(noise).view(-1, self.max_channels, 4, 4)
outputs = self.parb1(outputs, noise)
outputs = self.parb2(outputs, noise)
outputs = self.parb3(outputs, noise)
outputs = self.parb4(outputs, noise)
outputs = self.head(outputs)
assert outputs.shape == (noise.shape[0], 3, self.output_size, self.output_size)
return outputs
class Discriminator(nn.Module):
"""
Discriminator network (8 points)
TODO:
- Define a convolutional part of the discriminator similarly to
the generator blocks, but in the inverse order, with downsampling, and
without batch normalization
- At the end of the convolutional part apply ReLU and sum pooling
TODO: implement projection discriminator head (https://arxiv.org/abs/1802.05637)
Scheme: materials/prgan.png
Notation:
- phi is a convolutional part of the discriminator
- psi is a vector
- y is a class embedding
Class embeddings matrix is similar to the generator, shape: num_classes x max_channels
Discriminator outputs a B x 1 matrix of realism scores
Apply spectral norm for all layers (conv, linear, embedding)
"""
def __init__(self,
min_channels: int,
max_channels: int,
num_classes: int,
num_blocks: int,
use_projection_head: bool):
super(Discriminator, self).__init__()
# TODO
self.use_projection_head = use_projection_head
self.head = nn.Sequential(
spectral_norm(nn.Conv2d(3, min_channels, 3, padding=1)),
nn.ReLU(),
nn.BatchNorm2d(min_channels)
)
self.parb1 = PreActResBlock(min_channels, min_channels * 2, downsample=True)
self.parb2 = PreActResBlock(min_channels * 2, min_channels * 4, downsample=True)
self.parb3 = PreActResBlock(min_channels * 4, min_channels * 8
|
outputs = self.down(outputs)
|
conditional_block
|
model.py
|
After that, the "embeds" are linearly mapped to "gamma" and "bias"
These "gamma" and "bias" are applied to the outputs like in batch normalization
with affine = True (see definition of batch normalization for reference)
"""
def __init__(self, num_features: int, embed_features: int):
super(AdaptiveBatchNorm, self).__init__(num_features, affine=False)
# TODO
self.lin1 = nn.Linear(embed_features, num_features)
self.lin2 = nn.Linear(embed_features, num_features)
def forward(self, inputs, embeds):
gamma = self.lin1(embeds) # TODO
bias = self.lin2(embeds) # TODO
assert gamma.shape[0] == inputs.shape[0] and gamma.shape[1] == inputs.shape[1]
outputs = super().forward(inputs) # TODO: apply batchnorm
return outputs * gamma[..., None, None] + bias[..., None, None]
class PreActResBlock(nn.Module):
"""
Pre-activation residual block (6 points)
Paper: https://arxiv.org/pdf/1603.05027.pdf
Scheme: materials/preactresblock.png
Review: https://towardsdatascience.com/resnet-with-identity-mapping-over-1000-layers-reached-image-classification-bb50a42af03e
Args:
in_channels: input number of channels
out_channels: output number of channels
batchnorm: this block is with/without adaptive batch normalization
upsample: use nearest neighbours upsampling at the beginning
downsample: use average pooling after the end
in_channels != out_channels:
- first conv: in_channels -> out_channels
- second conv: out_channels -> out_channels
- use 1x1 conv in skip connection
in_channels == out_channels: skip connection is without a conv
"""
def
|
(self,
in_channels: int,
out_channels: int,
embed_channels: int = None,
batchnorm: bool = False,
upsample: bool = False,
downsample: bool = False):
super(PreActResBlock, self).__init__()
# TODO: define pre-activation residual block
# TODO: apply spectral normalization to conv layers
# Don't forget that activation after residual sum cannot be inplace!
self.batchnorm = batchnorm
self.upsample = upsample
self.downsample = downsample
if self.upsample:
self.up = nn.UpsamplingNearest2d(scale_factor=2)
if self.downsample:
self.down = nn.AvgPool2d(kernel_size=2)
self.skip_connection = torch.nn.utils.spectral_norm(nn.Conv2d(in_channels, out_channels, 1))
if self.batchnorm:
self.ad_norm1 = AdaptiveBatchNorm(in_channels, embed_channels)
else:
self.ad_norm1 = nn.Identity()
self.relu1 = nn.ReLU()
self.sp_norm_conv1 = spectral_norm(nn.Conv2d(in_channels, out_channels, 3, padding=1))
if self.batchnorm:
self.ad_norm2 = AdaptiveBatchNorm(out_channels, embed_channels)
else:
self.ad_norm2 = nn.Identity()
self.relu2 = nn.ReLU()
self.sp_norm_conv2 = spectral_norm(nn.Conv2d(out_channels, out_channels, 3, padding=1))
def forward(self,
inputs, # regular features
embeds=None): # embeds used in adaptive batch norm
# TODO
if self.upsample:
inputs = self.up(inputs)
if self.batchnorm:
outputs = self.ad_norm1(inputs, embeds)
else:
outputs = self.ad_norm1(inputs)
outputs = self.sp_norm_conv1(self.relu1(outputs))
if self.batchnorm:
outputs = self.ad_norm2(outputs, embeds)
else:
outputs = self.ad_norm2(outputs)
outputs = self.sp_norm_conv2(self.relu2(outputs))
# skip connection
outputs += self.skip_connection(inputs)
if self.downsample:
outputs = self.down(outputs)
return outputs
class Generator(nn.Module):
"""
Generator network (8 points)
TODO:
- Implement an option to condition the synthesis on trainable class embeddings
(use nn.Embedding module with noise_channels as the size of each embed)
- Concatenate input noise with class embeddings (if use_class_condition = True) to obtain input embeddings
- Linearly map input embeddings into input tensor with the following dims: max_channels x 4 x 4
- Forward an input tensor through a convolutional part,
which consists of num_blocks PreActResBlocks and performs upsampling by a factor of 2 in each block
- Each PreActResBlock is additionally conditioned on the input embeddings (via adaptive batch normalization)
- At the end of the convolutional part apply regular BN, ReLU and Conv as an image prediction head
- Apply spectral norm to all conv and linear layers (not the embedding layer)
- Use Sigmoid at the end to map the outputs into an image
Notes:
- The last convolutional layer should map min_channels to 3. With each upsampling you should decrease
the number of channels by a factor of 2
- Class embeddings are only used and trained if use_class_condition = True
"""
def __init__(self,
min_channels: int,
max_channels: int,
noise_channels: int,
num_classes: int,
num_blocks: int,
use_class_condition: bool):
super(Generator, self).__init__()
self.output_size = 4 * 2**num_blocks
# TODO
self.max_channels = max_channels
self.use_class_condition = use_class_condition
self.embed = torch.nn.Embedding(num_classes, noise_channels)
if self.use_class_condition:
noise_channels = noise_channels*2
self.sp_norm_lin = torch.nn.utils.spectral_norm(nn.Linear(noise_channels, 4*4*self.max_channels))
self.parb1 = PreActResBlock(self.max_channels, self.max_channels // 2, embed_channels=noise_channels, batchnorm=self.use_class_condition, upsample=True)
self.parb2 = PreActResBlock(self.max_channels // 2, self.max_channels // 4, embed_channels=noise_channels, batchnorm=self.use_class_condition, upsample=True)
self.parb3 = PreActResBlock(self.max_channels // 4, self.max_channels // 8, embed_channels=noise_channels, batchnorm=self.use_class_condition, upsample=True)
self.parb4 = PreActResBlock(self.max_channels // 8, self.max_channels // 16, embed_channels=noise_channels, batchnorm=self.use_class_condition, upsample=True)
self.head = nn.Sequential(
nn.BatchNorm2d(min_channels),
nn.ReLU(),
torch.nn.utils.spectral_norm(nn.Conv2d(min_channels, 3, 3, padding=1)),
nn.Sigmoid()
)
def forward(self, noise, labels):
# TODO
if self.use_class_condition:
noise = torch.cat((self.embed(labels), noise), dim=-1)
outputs = self.sp_norm_lin(noise).view(-1, self.max_channels, 4, 4)
outputs = self.parb1(outputs, noise)
outputs = self.parb2(outputs, noise)
outputs = self.parb3(outputs, noise)
outputs = self.parb4(outputs, noise)
outputs = self.head(outputs)
assert outputs.shape == (noise.shape[0], 3, self.output_size, self.output_size)
return outputs
class Discriminator(nn.Module):
"""
Discriminator network (8 points)
TODO:
- Define a convolutional part of the discriminator similarly to
the generator blocks, but in the inverse order, with downsampling, and
without batch normalization
- At the end of the convolutional part apply ReLU and sum pooling
TODO: implement projection discriminator head (https://arxiv.org/abs/1802.05637)
Scheme: materials/prgan.png
Notation:
- phi is a convolutional part of the discriminator
- psi is a vector
- y is a class embedding
Class embeddings matrix is similar to the generator, shape: num_classes x max_channels
Discriminator outputs a B x 1 matrix of realism scores
Apply spectral norm for all layers (conv, linear, embedding)
"""
def __init__(self,
min_channels: int,
max_channels: int,
num_classes: int,
num_blocks: int,
use_projection_head: bool):
super(Discriminator, self).__init__()
# TODO
self.use_projection_head = use_projection_head
self.head = nn.Sequential(
spectral_norm(nn.Conv2d(3, min_channels, 3, padding=1)),
nn.ReLU(),
nn.BatchNorm2d(min_channels)
)
self.parb1 = PreActResBlock(min_channels, min_channels * 2, downsample=True)
self.parb2 = PreActResBlock(min_channels * 2, min_channels * 4, downsample=True)
self.parb3 = PreActResBlock(min_channels * 4, min_channels * 8
|
__init__
|
identifier_name
|
model.py
|
False
After that, the "embeds" are linearly mapped to "gamma" and "bias"
These "gamma" and "bias" are applied to the outputs like in batch normalization
with affine = True (see definition of batch normalization for reference)
"""
def __init__(self, num_features: int, embed_features: int):
super(AdaptiveBatchNorm, self).__init__(num_features, affine=False)
# TODO
self.lin1 = nn.Linear(embed_features, num_features)
self.lin2 = nn.Linear(embed_features, num_features)
def forward(self, inputs, embeds):
gamma = self.lin1(embeds) # TODO
bias = self.lin2(embeds) # TODO
assert gamma.shape[0] == inputs.shape[0] and gamma.shape[1] == inputs.shape[1]
outputs = super().forward(inputs) # TODO: apply batchnorm
return outputs * gamma[..., None, None] + bias[..., None, None]
class PreActResBlock(nn.Module):
"""
Pre-activation residual block (6 points)
Paper: https://arxiv.org/pdf/1603.05027.pdf
Scheme: materials/preactresblock.png
Review: https://towardsdatascience.com/resnet-with-identity-mapping-over-1000-layers-reached-image-classification-bb50a42af03e
Args:
in_channels: input number of channels
out_channels: output number of channels
batchnorm: this block is with/without adaptive batch normalization
upsample: use nearest neighbours upsampling at the beginning
downsample: use average pooling after the end
in_channels != out_channels:
- first conv: in_channels -> out_channels
- second conv: out_channels -> out_channels
- use 1x1 conv in skip connection
in_channels == out_channels: skip connection is without a conv
"""
def __init__(self,
in_channels: int,
out_channels: int,
embed_channels: int = None,
batchnorm: bool = False,
upsample: bool = False,
downsample: bool = False):
super(PreActResBlock, self).__init__()
# TODO: define pre-activation residual block
# TODO: apply spectral normalization to conv layers
# Don't forget that activation after residual sum cannot be inplace!
self.batchnorm = batchnorm
self.upsample = upsample
self.downsample = downsample
if self.upsample:
self.up = nn.UpsamplingNearest2d(scale_factor=2)
if self.downsample:
self.down = nn.AvgPool2d(kernel_size=2)
self.skip_connection = torch.nn.utils.spectral_norm(nn.Conv2d(in_channels, out_channels, 1))
if self.batchnorm:
self.ad_norm1 = AdaptiveBatchNorm(in_channels, embed_channels)
else:
self.ad_norm1 = nn.Identity()
self.relu1 = nn.ReLU()
self.sp_norm_conv1 = spectral_norm(nn.Conv2d(in_channels, out_channels, 3, padding=1))
if self.batchnorm:
self.ad_norm2 = AdaptiveBatchNorm(out_channels, embed_channels)
else:
self.ad_norm2 = nn.Identity()
self.relu2 = nn.ReLU()
self.sp_norm_conv2 = spectral_norm(nn.Conv2d(out_channels, out_channels, 3, padding=1))
def forward(self,
inputs, # regular features
embeds=None): # embeds used in adaptive batch norm
# TODO
if self.upsample:
inputs = self.up(inputs)
if self.batchnorm:
outputs = self.ad_norm1(inputs, embeds)
else:
outputs = self.ad_norm1(inputs)
outputs = self.sp_norm_conv1(self.relu1(outputs))
if self.batchnorm:
outputs = self.ad_norm2(outputs, embeds)
else:
outputs = self.ad_norm2(outputs)
outputs = self.sp_norm_conv2(self.relu2(outputs))
# skip connection
outputs += self.skip_connection(inputs)
if self.downsample:
outputs = self.down(outputs)
return outputs
class Generator(nn.Module):
"""
Generator network (8 points)
TODO:
- Implement an option to condition the synthesis on trainable class embeddings
(use nn.Embedding module with noise_channels as the size of each embed)
- Concatenate input noise with class embeddings (if use_class_condition = True) to obtain input embeddings
- Linearly map input embeddings into input tensor with the following dims: max_channels x 4 x 4
- Forward an input tensor through a convolutional part,
which consists of num_blocks PreActResBlocks and performs upsampling by a factor of 2 in each block
- Each PreActResBlock is additionally conditioned on the input embeddings (via adaptive batch normalization)
- At the end of the convolutional part apply regular BN, ReLU and Conv as an image prediction head
- Apply spectral norm to all conv and linear layers (not the embedding layer)
- Use Sigmoid at the end to map the outputs into an image
Notes:
- The last convolutional layer should map min_channels to 3. With each upsampling you should decrease
the number of channels by a factor of 2
- Class embeddings are only used and trained if use_class_condition = True
"""
def __init__(self,
min_channels: int,
max_channels: int,
noise_channels: int,
num_classes: int,
num_blocks: int,
use_class_condition: bool):
super(Generator, self).__init__()
self.output_size = 4 * 2**num_blocks
# TODO
self.max_channels = max_channels
self.use_class_condition = use_class_condition
self.embed = torch.nn.Embedding(num_classes, noise_channels)
if self.use_class_condition:
noise_channels = noise_channels*2
self.sp_norm_lin = torch.nn.utils.spectral_norm(nn.Linear(noise_channels, 4*4*self.max_channels))
self.parb1 = PreActResBlock(self.max_channels, self.max_channels // 2, embed_channels=noise_channels, batchnorm=self.use_class_condition, upsample=True)
self.parb2 = PreActResBlock(self.max_channels // 2, self.max_channels // 4, embed_channels=noise_channels, batchnorm=self.use_class_condition, upsample=True)
self.parb3 = PreActResBlock(self.max_channels // 4, self.max_channels // 8, embed_channels=noise_channels, batchnorm=self.use_class_condition, upsample=True)
self.parb4 = PreActResBlock(self.max_channels // 8, self.max_channels // 16, embed_channels=noise_channels, batchnorm=self.use_class_condition, upsample=True)
self.head = nn.Sequential(
nn.BatchNorm2d(min_channels),
nn.ReLU(),
torch.nn.utils.spectral_norm(nn.Conv2d(min_channels, 3, 3, padding=1)),
nn.Sigmoid()
)
def forward(self, noise, labels):
# TODO
if self.use_class_condition:
noise = torch.cat((self.embed(labels), noise), dim=-1)
outputs = self.sp_norm_lin(noise).view(-1, self.max_channels, 4, 4)
outputs = self.parb1(outputs, noise)
outputs = self.parb2(outputs, noise)
outputs = self.parb3(outputs, noise)
outputs = self.parb4(outputs, noise)
outputs = self.head(outputs)
assert outputs.shape == (noise.shape[0], 3, self.output_size, self.output_size)
return outputs
class Discriminator(nn.Module):
|
TODO:
- Define a convolutional part of the discriminator similarly to
the generator blocks, but in the inverse order, with downsampling, and
without batch normalization
- At the end of the convolutional part apply ReLU and sum pooling
TODO: implement projection discriminator head (https://arxiv.org/abs/1802.05637)
Scheme: materials/prgan.png
Notation:
- phi is a convolutional part of the discriminator
- psi is a vector
- y is a class embedding
Class embeddings matrix is similar to the generator, shape: num_classes x max_channels
Discriminator outputs a B x 1 matrix of realism scores
Apply spectral norm for all layers (conv, linear, embedding)
"""
def __init__(self,
min_channels: int,
max_channels: int,
num_classes: int,
num_blocks: int,
use_projection_head: bool):
super(Discriminator, self).__init__()
# TODO
self.use_projection_head = use_projection_head
self.head = nn.Sequential(
spectral_norm(nn.Conv2d(3, min_channels, 3, padding=1)),
nn.ReLU(),
nn.BatchNorm2d(min_channels)
)
self.parb1 = PreActResBlock(min_channels, min_channels * 2, downsample=True)
self.parb2 = PreActResBlock(min_channels * 2, min_channels * 4, downsample=True)
self.parb3 = PreActResBlock(min_channels * 4, min_channels * 8
|
"""
Discriminator network (8 points)
|
random_line_split
|
simpleLSTM.py
|
_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj'])], axis=1)
params[_p(prefix, 'W')] = W
U = numpy.concatenate([ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj'])], axis=1)
params[_p(prefix, 'U')] = U
b = numpy.zeros((4 * options['dim_proj'],)) # (4*dim)
b[options['dim_proj']: 2 * options['dim_proj']] = 5
params[_p(prefix, 'b')] = b.astype(theano.config.floatX)
return params
def init_params(options):
"""
程序用到的全局变量,以有序字典的方式存放在params中
Wemb 是
"""
params = OrderedDict()
# LSTM层的系数
params = param_init_lstm(options,
params,
prefix=options['encoder'])
# 输出层的系数
params['U'] = 0.1 * numpy.random.randn(options['dim_proj'],
options['ydim']).astype(theano.config.floatX)
params['b'] = numpy.zeros((options['ydim'],)).astype(theano.config.floatX)
return params
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
def lstm_layer(tparams, x_sequence, options, prefix='lstm'):
'''
'''
nsteps = x_sequence.shape[0]
# (n_t, 4*dim)
state_below = (T.dot(x_sequence, tparams[_p(prefix, 'W')]) +
tparams[_p(prefix, 'b')])
dim_proj = options['dim_proj']
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
def _step(x_, h_, c_):
'''
x_ : 单元的输入数据: W x + b
h_ : 前一时刻单元的输出
|
preact = T.dot(h_, tparams[_p(prefix, 'U')]) # (4*dim)
preact += x_ # h 延时后加权的目标维数 和 Wx+b的维数相同,都是LSTM单元的个数的4倍,可以直接相加
i = T.nnet.sigmoid(_slice(preact, 0, dim_proj)) # input gate
f = T.nnet.sigmoid(_slice(preact, 1, dim_proj)) # forget gate
o = T.nnet.sigmoid(_slice(preact, 2, dim_proj)) # output gate
c = T.tanh(_slice(preact, 3, dim_proj)) # cell state pre
c = f * c_ + i * c # cell state
h = o * T.tanh(c) # unit output
return h, c
out_h = theano.shared(numpy.zeros((1,dim_proj), dtype=theano.config.floatX), name="out_h")
out_c = theano.shared(numpy.zeros((1,dim_proj), dtype=theano.config.floatX), name="out_c")
rval, updates = theano.scan(_step,
sequences=state_below,
outputs_info=[out_h, out_c],
name=_p(prefix, '_layers'),
n_steps=nsteps)
return rval[0]
def build_model(tparams, options):
# Used for dropout.
use_noise = theano.shared(numpy_floatX(0.))
# 以下变量的第1维是:时间
x = T.matrix()
y = T.vector()
proj = lstm_layer(tparams, x, options,
prefix=options['encoder'])
proj = theano.tensor.reshape(proj, (proj.shape[0], proj.shape[2]))
# pred = T.tanh(T.dot(proj, tparams['U']) + tparams['b'])
pred = T.dot(proj, tparams['U']) + tparams['b']
f_pred_prob = theano.function([x], pred, name='f_pred_prob')
#
# off = 1e-8
# if pred.dtype == 'float16':
# off = 1e-6
pred = theano.tensor.flatten(pred)
cost = ((pred - y)**2).sum()
return use_noise, x, y, f_pred_prob, cost
def adadelta(lr, tparams, grads, x, y, cost):
"""
An adaptive learning rate optimizer
Parameters
----------
lr : Theano SharedVariable
Initial learning rate
tpramas: Theano SharedVariable
Model parameters
grads: Theano variable
Gradients of cost w.r.t to parameres
x: Theano variable
Model inputs
mask: Theano variable
Sequence mask
y: Theano variable
Targets
cost: Theano variable
Objective fucntion to minimize
Notes
-----
For more information, see [ADADELTA]_.
.. [ADADELTA] Matthew D. Zeiler, *ADADELTA: An Adaptive Learning
Rate Method*, arXiv:1212.5701.
"""
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rup2' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
updates_1 = zgup + rg2up
f_grad_shared = theano.function([x, y], cost, updates=updates_1,
name='adadelta_f_grad_shared',
mode='FAST_COMPILE')
updir = [-T.sqrt(ru2 + 1e-6) / T.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads,
running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
updates_2 = ru2up + param_up;
f_update = theano.function([lr], [], updates=updates_2,
on_unused_input='ignore',
name='adadelta_f_update',
mode='FAST_COMPILE')
return updates_1, updates_2,f_grad_shared, f_update
def train_lstm(
dim_proj=40, # 输入x的个数和LSTM单元个数相等
patience=10, # Number of epoch to wait before early stop if no progress
max_epochs=1500, # The maximum number of epoch to run
dispFreq=10, # Display to stdout the training progress every N updates
decay_c=0., # Weight decay for the classifier applied to the U weights.
lrate=0.0001, # Learning rate for sgd (not used for adadelta and rmsprop)
n_words=10000, # Vocabulary size
optimizer=adadelta, # sgd, adadelta and rmsprop available, sgd very hard to use, not recommanded (probably need momentum and decaying learning rate).
encoder='lstm', # TODO: can be removed must be lstm.
saveto='lstm_model.npz', # The best model will be saved there
validFreq=370, # Compute the validation error after this number of update.
saveFreq=1110, # Save the parameters after every saveFreq updates
maxlen=100, # Sequence longer then this get ignored
batch_size=16, # The batch size during training.
valid_batch_size=64, # The batch size used for validation/test set.
dataset='imdb',
# Parameter for extra option
noise_std=0.,
use_dropout=True, # if False slightly faster, but worst test error
# This frequently need a bigger
|
c_ : 前一时刻单元的Cell值
'''
|
conditional_block
|
simpleLSTM.py
|
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj'])], axis=1)
params[_p(prefix, 'W')] = W
U = numpy.concatenate([ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj'])], axis=1)
params[_p(prefix, 'U')] = U
b = numpy.zeros((4 * options['dim_proj'],)) # (4*dim)
b[options['dim_proj']: 2 * options['dim_proj']] = 5
params[_p(prefix, 'b')] = b.astype(theano.config.floatX)
return params
def init_params(options):
"""
程序用到的全局变量,以有序字典的方式存放在params中
Wemb 是
"""
params = OrderedDict()
# LSTM层的系数
params = param_init_lstm(options,
params,
prefix=options['encoder'])
# 输出层的系数
params['U'] = 0.1 * numpy.random.randn(options['dim_proj'],
options['ydim']).astype(theano.config.floatX)
params['b'] = numpy.zeros((options['ydim'],)).astype(theano.config.floatX)
return params
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
def lstm_layer(tparams, x_sequence, options, prefix='lstm'):
'''
'''
nsteps = x_sequence.shape[0]
# (n_t, 4*dim)
state_below = (T.dot(x_sequence, tparams[_p(prefix, 'W')]) +
tparams[_p(prefix, 'b')])
dim_proj = options['dim_proj']
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
def _step(x_, h_, c_):
'''
x_ : 单元的输入数据: W x + b
h_ : 前一时刻单元的输出
c_ : 前一时刻单元的Cell值
'''
preact = T.dot(h_, tparams[_p(prefix, 'U')]) # (4*dim)
preact += x_ # h 延时后加权的目标维数 和 Wx+b的维数相同,都是LSTM单元的个数的4倍,可以直接相加
i = T.nnet.sigmoid(_slice(preact, 0, dim_proj)) # input gate
f = T.nnet.sigmoid(_slice(preact, 1, dim_proj)) # forget gate
o = T.nnet.sigmoid(_slice(preact, 2, dim_proj)) # output gate
c = T.tanh(_slice(preact, 3, dim_proj)) # cell state pre
c = f * c_ + i * c # cell state
h = o * T.tanh(c) # unit output
return h, c
out_h = theano.shared(numpy.zeros((1,dim_proj), dtype=theano.config.floatX), name="out_h")
out_c = theano.shared(numpy.zeros((1,dim_proj), dtype=theano.config.floatX), name="out_c")
rval, updates = theano.scan(_step,
sequences=state_below,
outputs_info=[out_h, out_c],
name=_p(prefix, '_layers'),
n_steps=nsteps)
return rval[0]
def build_model(tparams, options):
# Used for dropout.
use_noise = theano.shared(numpy_floatX(0.))
# 以下变量的第1维是:时间
x = T.matrix()
y = T.vector()
|
proj = lstm_layer(tparams, x, options,
prefix=options['encoder'])
proj = theano.tensor.reshape(proj, (proj.shape[0], proj.shape[2]))
# pred = T.tanh(T.dot(proj, tparams['U']) + tparams['b'])
pred = T.dot(proj, tparams['U']) + tparams['b']
f_pred_prob = theano.function([x], pred, name='f_pred_prob')
#
# off = 1e-8
# if pred.dtype == 'float16':
# off = 1e-6
pred = theano.tensor.flatten(pred)
cost = ((pred - y)**2).sum()
return use_noise, x, y, f_pred_prob, cost
def adadelta(lr, tparams, grads, x, y, cost):
"""
An adaptive learning rate optimizer
Parameters
----------
lr : Theano SharedVariable
Initial learning rate
tpramas: Theano SharedVariable
Model parameters
grads: Theano variable
Gradients of cost w.r.t to parameres
x: Theano variable
Model inputs
mask: Theano variable
Sequence mask
y: Theano variable
Targets
cost: Theano variable
Objective fucntion to minimize
Notes
-----
For more information, see [ADADELTA]_.
.. [ADADELTA] Matthew D. Zeiler, *ADADELTA: An Adaptive Learning
Rate Method*, arXiv:1212.5701.
"""
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rup2' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
updates_1 = zgup + rg2up
f_grad_shared = theano.function([x, y], cost, updates=updates_1,
name='adadelta_f_grad_shared',
mode='FAST_COMPILE')
updir = [-T.sqrt(ru2 + 1e-6) / T.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads,
running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
updates_2 = ru2up + param_up;
f_update = theano.function([lr], [], updates=updates_2,
on_unused_input='ignore',
name='adadelta_f_update',
mode='FAST_COMPILE')
return updates_1, updates_2,f_grad_shared, f_update
def train_lstm(
dim_proj=40, # 输入x的个数和LSTM单元个数相等
patience=10, # Number of epoch to wait before early stop if no progress
max_epochs=1500, # The maximum number of epoch to run
dispFreq=10, # Display to stdout the training progress every N updates
decay_c=0., # Weight decay for the classifier applied to the U weights.
lrate=0.0001, # Learning rate for sgd (not used for adadelta and rmsprop)
n_words=10000, # Vocabulary size
optimizer=adadelta, # sgd, adadelta and rmsprop available, sgd very hard to use, not recommanded (probably need momentum and decaying learning rate).
encoder='lstm', # TODO: can be removed must be lstm.
saveto='lstm_model.npz', # The best model will be saved there
validFreq=370, # Compute the validation error after this number of update.
saveFreq=1110, # Save the parameters after every saveFreq updates
maxlen=100, # Sequence longer then this get ignored
batch_size=16, # The batch size during training.
valid_batch_size=64, # The batch size used for validation/test set.
dataset='imdb',
# Parameter for extra option
noise_std=0.,
use_dropout=True, # if False slightly faster, but worst test error
# This frequently need a bigger
|
random_line_split
|
|
simpleLSTM.py
|
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj'])], axis=1)
params[_p(prefix, 'W')] = W
U = numpy.concatenate([ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj']),
ortho_weight(options['dim_proj'])], axis=1)
params[_p(prefix, 'U')] = U
b = numpy.zeros((4 * options['dim_proj'],)) # (4*dim)
b[options['dim_proj']: 2 * options['dim_proj']] = 5
params[_p(prefix, 'b')] = b.astype(theano.config.floatX)
return params
def init_params(options):
"""
程序用到的全局变量,以有序字典的方式存放在params中
Wemb 是
"""
params = OrderedDict()
# LSTM层的系数
params = param_init_lstm(options,
params,
prefix=options['encoder'])
# 输出层的系数
params['U'] = 0.1 * numpy.random.randn(options['dim_proj'],
options['ydim']).astype(theano.config.floatX)
params['b'] = numpy.zeros((options['ydim'],)).astype(theano.config.floatX)
return params
def init_tparams(params):
tparams = OrderedDict()
for kk, pp in params.iteritems():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
def lstm_layer(tparams, x_sequence, options, prefix='lstm'):
'''
'''
nsteps = x_sequence.shape[0]
# (n_t, 4*dim)
state_below = (T.dot(x_sequence, tparams[_p(prefix, 'W')]) +
tparams[_p(prefix, 'b')])
dim_proj = options['dim_proj']
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n * dim:(n + 1) * dim]
return _x[:, n * dim:(n + 1) * dim]
def _step(x_, h_, c_):
'''
x_ : 单元的输入数据: W x + b
h_ : 前一时刻单元的输出
c_ : 前一时刻单元的Cell值
'''
preact = T.dot(h_, tparams[_p(prefix, 'U')]) # (4*dim)
preact += x_ # h 延时后加权的目标维数 和 Wx+b的维数相同,都是LSTM单元的个数的4倍,可以直接相加
i = T.nnet.sigmoid(_slice(preact, 0, dim_proj)) # input gate
f = T.nnet.sigmoid(_slice(preact, 1, dim_proj)) # forget gate
o = T.nnet.sigmoid(_slice(preact, 2, dim_proj)) # output gate
c = T.tanh(_slice(preact, 3, dim_proj)) # cell state pre
c = f * c_ + i * c # cell state
h = o * T.tanh(c) # unit output
return h, c
out_h = theano.shared(numpy.zeros((1,dim_proj), dtype=theano.config.floatX), name="out_h")
out_c = theano.shared(numpy.zeros((1,dim_proj), dtype=theano.config.floatX), name="out_c")
rval, updates = theano.scan(_step,
sequences=state_below,
outputs_info=[out_h, out_c],
name=_p(prefix, '_layers'),
n_steps=nsteps)
return rval[0]
def build_model(tparams, options):
# Used for dropout.
use_noise = theano.shared(numpy_floatX(0.))
# 以下变量的第1维是:时间
x = T.matrix()
y = T.vector()
proj = lstm_layer(tparams, x, options,
prefix=options['encoder'])
proj = theano.tensor.reshape(proj, (proj.shape[0], proj.shape[2]))
# pred = T.tanh(T.dot(proj, tparams['U']) + tparams['b'])
pred = T.dot(proj, tparams['U']) + tparams['b']
f_pred_prob = theano.function([x], pred, name='f_pred_prob')
#
# off = 1e-8
# if pred.dtype == 'float16':
# off = 1e-6
pred = theano.tensor.flatten(pred)
cost = ((pred - y)**2).sum()
return use_noise, x, y, f_pred_prob, cost
def adadelta(lr, tparams, grads, x, y, cost):
"""
An adaptive learning rate optimizer
Parameters
----------
lr : Theano SharedVariable
Initial learning rate
tpramas: Theano SharedVariable
Model parameters
grads: Theano variable
Gradients of cost w.r.t to parameres
x: Theano variable
Model inputs
mask: Theano variable
Sequence mask
y: Theano variable
Targets
cost: Theano variable
Objective fucntion to minimize
Notes
-----
For more information, see [ADADELTA]_.
.. [ADADELTA] Matthew D. Zeiler, *ADADELTA: An Adaptive Learning
Rate Method*, arXiv:1212.5701.
"""
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rup2' % k)
for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
updates_1 = zgup + rg2up
f_grad_shared = theano.function([x, y], cost, updates=updates_1,
name='adadelta_f_grad_shared',
mode='FAST_COMPILE')
updir = [-T.sqrt(ru2 + 1e-6) / T.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads,
running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
updates_2 = ru2up + param_up;
f_update = theano.function([lr], [], updates=updates_2,
on_unused_input='ignore',
name='adadelta_f_update',
mode='FAST_COMPILE')
return updates_1, updates_2,f_grad_shared, f_update
def train_lstm(
dim_proj=40, # 输入x的个数和LSTM单元个数相等
patience=10, # Number of epoch to wait before early stop if no progress
max_epochs=1500, # The maximum number of epoch to run
dispFreq=10, # Display to stdout the training progress every N updates
decay_c=0., # Weight decay for the classifier appl
|
U weights.
lrate=0.0001, # Learning rate for sgd (not used for adadelta and rmsprop)
n_words=10000, # Vocabulary size
optimizer=adadelta, # sgd, adadelta and rmsprop available, sgd very hard to use, not recommanded (probably need momentum and decaying learning rate).
encoder='lstm', # TODO: can be removed must be lstm.
saveto='lstm_model.npz', # The best model will be saved there
validFreq=370, # Compute the validation error after this number of update.
saveFreq=1110, # Save the parameters after every saveFreq updates
maxlen=100, # Sequence longer then this get ignored
batch_size=16, # The batch size during training.
valid_batch_size=64, # The batch size used for validation/test set.
dataset='imdb',
# Parameter for extra option
noise_std=0.,
use_dropout=True, # if False slightly faster, but worst test error
# This frequently need a bigger
|
ied to the
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.