repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
pedrobaeza/OpenUpgrade
|
addons/base_import_module/tests/test_module/__openerp__.py
|
377
|
1290
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Test Module',
'category': 'Website',
'summary': 'Custom',
'version': '1.0',
'description': """
Test
""",
'author': 'OpenERP SA',
'depends': ['website'],
'data': [
'test.xml',
],
'installable': True,
'application': True,
}
|
agpl-3.0
|
wskplho/sl4a
|
python-build/python-libs/python-twitter/twitter.py
|
89
|
70251
|
#!/usr/bin/python2.4
#
# Copyright 2007 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''A library that provides a python interface to the Twitter API'''
__author__ = 'dewitt@google.com'
__version__ = '0.7-devel'
import base64
import calendar
import httplib
import os
import rfc822
import simplejson
import sys
import tempfile
import textwrap
import time
import urllib
import urllib2
import urlparse
try:
from hashlib import md5
except ImportError:
from md5 import md5
CHARACTER_LIMIT = 140
# A singleton representing a lazily instantiated FileCache.
DEFAULT_CACHE = object()
class TwitterError(Exception):
'''Base class for Twitter errors'''
@property
def message(self):
'''Returns the first argument used to construct this error.'''
return self.args[0]
class Status(object):
'''A class representing the Status structure used by the twitter API.
The Status structure exposes the following properties:
status.created_at
status.created_at_in_seconds # read only
status.favorited
status.in_reply_to_screen_name
status.in_reply_to_user_id
status.in_reply_to_status_id
status.truncated
status.source
status.id
status.text
status.relative_created_at # read only
status.user
'''
def __init__(self,
created_at=None,
favorited=None,
id=None,
text=None,
user=None,
in_reply_to_screen_name=None,
in_reply_to_user_id=None,
in_reply_to_status_id=None,
truncated=None,
source=None,
now=None):
'''An object to hold a Twitter status message.
This class is normally instantiated by the twitter.Api class and
returned in a sequence.
Note: Dates are posted in the form "Sat Jan 27 04:17:38 +0000 2007"
Args:
created_at: The time this status message was posted
favorited: Whether this is a favorite of the authenticated user
id: The unique id of this status message
text: The text of this status message
relative_created_at:
A human readable string representing the posting time
user:
A twitter.User instance representing the person posting the message
now:
The current time, if the client choses to set it. Defaults to the
wall clock time.
'''
self.created_at = created_at
self.favorited = favorited
self.id = id
self.text = text
self.user = user
self.now = now
self.in_reply_to_screen_name = in_reply_to_screen_name
self.in_reply_to_user_id = in_reply_to_user_id
self.in_reply_to_status_id = in_reply_to_status_id
self.truncated = truncated
self.source = source
def GetCreatedAt(self):
'''Get the time this status message was posted.
Returns:
The time this status message was posted
'''
return self._created_at
def SetCreatedAt(self, created_at):
'''Set the time this status message was posted.
Args:
created_at: The time this status message was created
'''
self._created_at = created_at
created_at = property(GetCreatedAt, SetCreatedAt,
doc='The time this status message was posted.')
def GetCreatedAtInSeconds(self):
'''Get the time this status message was posted, in seconds since the epoch.
Returns:
The time this status message was posted, in seconds since the epoch.
'''
return calendar.timegm(rfc822.parsedate(self.created_at))
created_at_in_seconds = property(GetCreatedAtInSeconds,
doc="The time this status message was "
"posted, in seconds since the epoch")
def GetFavorited(self):
'''Get the favorited setting of this status message.
Returns:
True if this status message is favorited; False otherwise
'''
return self._favorited
def SetFavorited(self, favorited):
'''Set the favorited state of this status message.
Args:
favorited: boolean True/False favorited state of this status message
'''
self._favorited = favorited
favorited = property(GetFavorited, SetFavorited,
doc='The favorited state of this status message.')
def GetId(self):
'''Get the unique id of this status message.
Returns:
The unique id of this status message
'''
return self._id
def SetId(self, id):
'''Set the unique id of this status message.
Args:
id: The unique id of this status message
'''
self._id = id
id = property(GetId, SetId,
doc='The unique id of this status message.')
def GetInReplyToScreenName(self):
return self._in_reply_to_screen_name
def SetInReplyToScreenName(self, in_reply_to_screen_name):
self._in_reply_to_screen_name = in_reply_to_screen_name
in_reply_to_screen_name = property(GetInReplyToScreenName, SetInReplyToScreenName,
doc='')
def GetInReplyToUserId(self):
return self._in_reply_to_user_id
def SetInReplyToUserId(self, in_reply_to_user_id):
self._in_reply_to_user_id = in_reply_to_user_id
in_reply_to_user_id = property(GetInReplyToUserId, SetInReplyToUserId,
doc='')
def GetInReplyToStatusId(self):
return self._in_reply_to_status_id
def SetInReplyToStatusId(self, in_reply_to_status_id):
self._in_reply_to_status_id = in_reply_to_status_id
in_reply_to_status_id = property(GetInReplyToStatusId, SetInReplyToStatusId,
doc='')
def GetTruncated(self):
return self._truncated
def SetTruncated(self, truncated):
self._truncated = truncated
truncated = property(GetTruncated, SetTruncated,
doc='')
def GetSource(self):
return self._source
def SetSource(self, source):
self._source = source
source = property(GetSource, SetSource,
doc='')
def GetText(self):
'''Get the text of this status message.
Returns:
The text of this status message.
'''
return self._text
def SetText(self, text):
'''Set the text of this status message.
Args:
text: The text of this status message
'''
self._text = text
text = property(GetText, SetText,
doc='The text of this status message')
def GetRelativeCreatedAt(self):
'''Get a human redable string representing the posting time
Returns:
A human readable string representing the posting time
'''
fudge = 1.25
delta = long(self.now) - long(self.created_at_in_seconds)
if delta < (1 * fudge):
return 'about a second ago'
elif delta < (60 * (1/fudge)):
return 'about %d seconds ago' % (delta)
elif delta < (60 * fudge):
return 'about a minute ago'
elif delta < (60 * 60 * (1/fudge)):
return 'about %d minutes ago' % (delta / 60)
elif delta < (60 * 60 * fudge):
return 'about an hour ago'
elif delta < (60 * 60 * 24 * (1/fudge)):
return 'about %d hours ago' % (delta / (60 * 60))
elif delta < (60 * 60 * 24 * fudge):
return 'about a day ago'
else:
return 'about %d days ago' % (delta / (60 * 60 * 24))
relative_created_at = property(GetRelativeCreatedAt,
doc='Get a human readable string representing'
'the posting time')
def GetUser(self):
'''Get a twitter.User reprenting the entity posting this status message.
Returns:
A twitter.User reprenting the entity posting this status message
'''
return self._user
def SetUser(self, user):
'''Set a twitter.User reprenting the entity posting this status message.
Args:
user: A twitter.User reprenting the entity posting this status message
'''
self._user = user
user = property(GetUser, SetUser,
doc='A twitter.User reprenting the entity posting this '
'status message')
def GetNow(self):
'''Get the wallclock time for this status message.
Used to calculate relative_created_at. Defaults to the time
the object was instantiated.
Returns:
Whatever the status instance believes the current time to be,
in seconds since the epoch.
'''
if self._now is None:
self._now = time.time()
return self._now
def SetNow(self, now):
'''Set the wallclock time for this status message.
Used to calculate relative_created_at. Defaults to the time
the object was instantiated.
Args:
now: The wallclock time for this instance.
'''
self._now = now
now = property(GetNow, SetNow,
doc='The wallclock time for this status instance.')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.created_at == other.created_at and \
self.id == other.id and \
self.text == other.text and \
self.user == other.user and \
self.in_reply_to_screen_name == other.in_reply_to_screen_name and \
self.in_reply_to_user_id == other.in_reply_to_user_id and \
self.in_reply_to_status_id == other.in_reply_to_status_id and \
self.truncated == other.truncated and \
self.favorited == other.favorited and \
self.source == other.source
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.Status instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.Status instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.Status instance.
Returns:
A JSON string representation of this twitter.Status instance
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
'''A dict representation of this twitter.Status instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.Status instance
'''
data = {}
if self.created_at:
data['created_at'] = self.created_at
if self.favorited:
data['favorited'] = self.favorited
if self.id:
data['id'] = self.id
if self.text:
data['text'] = self.text
if self.user:
data['user'] = self.user.AsDict()
if self.in_reply_to_screen_name:
data['in_reply_to_screen_name'] = self.in_reply_to_screen_name
if self.in_reply_to_user_id:
data['in_reply_to_user_id'] = self.in_reply_to_user_id
if self.in_reply_to_status_id:
data['in_reply_to_status_id'] = self.in_reply_to_status_id
if self.truncated is not None:
data['truncated'] = self.truncated
if self.favorited is not None:
data['favorited'] = self.favorited
if self.source:
data['source'] = self.source
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data: A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.Status instance
'''
if 'user' in data:
user = User.NewFromJsonDict(data['user'])
else:
user = None
return Status(created_at=data.get('created_at', None),
favorited=data.get('favorited', None),
id=data.get('id', None),
text=data.get('text', None),
in_reply_to_screen_name=data.get('in_reply_to_screen_name', None),
in_reply_to_user_id=data.get('in_reply_to_user_id', None),
in_reply_to_status_id=data.get('in_reply_to_status_id', None),
truncated=data.get('truncated', None),
source=data.get('source', None),
user=user)
class User(object):
'''A class representing the User structure used by the twitter API.
The User structure exposes the following properties:
user.id
user.name
user.screen_name
user.location
user.description
user.profile_image_url
user.profile_background_tile
user.profile_background_image_url
user.profile_sidebar_fill_color
user.profile_background_color
user.profile_link_color
user.profile_text_color
user.protected
user.utc_offset
user.time_zone
user.url
user.status
user.statuses_count
user.followers_count
user.friends_count
user.favourites_count
'''
def __init__(self,
id=None,
name=None,
screen_name=None,
location=None,
description=None,
profile_image_url=None,
profile_background_tile=None,
profile_background_image_url=None,
profile_sidebar_fill_color=None,
profile_background_color=None,
profile_link_color=None,
profile_text_color=None,
protected=None,
utc_offset=None,
time_zone=None,
followers_count=None,
friends_count=None,
statuses_count=None,
favourites_count=None,
url=None,
status=None):
self.id = id
self.name = name
self.screen_name = screen_name
self.location = location
self.description = description
self.profile_image_url = profile_image_url
self.profile_background_tile = profile_background_tile
self.profile_background_image_url = profile_background_image_url
self.profile_sidebar_fill_color = profile_sidebar_fill_color
self.profile_background_color = profile_background_color
self.profile_link_color = profile_link_color
self.profile_text_color = profile_text_color
self.protected = protected
self.utc_offset = utc_offset
self.time_zone = time_zone
self.followers_count = followers_count
self.friends_count = friends_count
self.statuses_count = statuses_count
self.favourites_count = favourites_count
self.url = url
self.status = status
def GetId(self):
'''Get the unique id of this user.
Returns:
The unique id of this user
'''
return self._id
def SetId(self, id):
'''Set the unique id of this user.
Args:
id: The unique id of this user.
'''
self._id = id
id = property(GetId, SetId,
doc='The unique id of this user.')
def GetName(self):
'''Get the real name of this user.
Returns:
The real name of this user
'''
return self._name
def SetName(self, name):
'''Set the real name of this user.
Args:
name: The real name of this user
'''
self._name = name
name = property(GetName, SetName,
doc='The real name of this user.')
def GetScreenName(self):
'''Get the short username of this user.
Returns:
The short username of this user
'''
return self._screen_name
def SetScreenName(self, screen_name):
'''Set the short username of this user.
Args:
screen_name: the short username of this user
'''
self._screen_name = screen_name
screen_name = property(GetScreenName, SetScreenName,
doc='The short username of this user.')
def GetLocation(self):
'''Get the geographic location of this user.
Returns:
The geographic location of this user
'''
return self._location
def SetLocation(self, location):
'''Set the geographic location of this user.
Args:
location: The geographic location of this user
'''
self._location = location
location = property(GetLocation, SetLocation,
doc='The geographic location of this user.')
def GetDescription(self):
'''Get the short text description of this user.
Returns:
The short text description of this user
'''
return self._description
def SetDescription(self, description):
'''Set the short text description of this user.
Args:
description: The short text description of this user
'''
self._description = description
description = property(GetDescription, SetDescription,
doc='The short text description of this user.')
def GetUrl(self):
'''Get the homepage url of this user.
Returns:
The homepage url of this user
'''
return self._url
def SetUrl(self, url):
'''Set the homepage url of this user.
Args:
url: The homepage url of this user
'''
self._url = url
url = property(GetUrl, SetUrl,
doc='The homepage url of this user.')
def GetProfileImageUrl(self):
'''Get the url of the thumbnail of this user.
Returns:
The url of the thumbnail of this user
'''
return self._profile_image_url
def SetProfileImageUrl(self, profile_image_url):
'''Set the url of the thumbnail of this user.
Args:
profile_image_url: The url of the thumbnail of this user
'''
self._profile_image_url = profile_image_url
profile_image_url= property(GetProfileImageUrl, SetProfileImageUrl,
doc='The url of the thumbnail of this user.')
def GetProfileBackgroundTile(self):
'''Boolean for whether to tile the profile background image.
Returns:
True if the background is to be tiled, False if not, None if unset.
'''
return self._profile_background_tile
def SetProfileBackgroundTile(self, profile_background_tile):
'''Set the boolean flag for whether to tile the profile background image.
Args:
profile_background_tile: Boolean flag for whether to tile or not.
'''
self._profile_background_tile = profile_background_tile
profile_background_tile = property(GetProfileBackgroundTile, SetProfileBackgroundTile,
doc='Boolean for whether to tile the background image.')
def GetProfileBackgroundImageUrl(self):
return self._profile_background_image_url
def SetProfileBackgroundImageUrl(self, profile_background_image_url):
self._profile_background_image_url = profile_background_image_url
profile_background_image_url = property(GetProfileBackgroundImageUrl, SetProfileBackgroundImageUrl,
doc='The url of the profile background of this user.')
def GetProfileSidebarFillColor(self):
return self._profile_sidebar_fill_color
def SetProfileSidebarFillColor(self, profile_sidebar_fill_color):
self._profile_sidebar_fill_color = profile_sidebar_fill_color
profile_sidebar_fill_color = property(GetProfileSidebarFillColor, SetProfileSidebarFillColor)
def GetProfileBackgroundColor(self):
return self._profile_background_color
def SetProfileBackgroundColor(self, profile_background_color):
self._profile_background_color = profile_background_color
profile_background_color = property(GetProfileBackgroundColor, SetProfileBackgroundColor)
def GetProfileLinkColor(self):
return self._profile_link_color
def SetProfileLinkColor(self, profile_link_color):
self._profile_link_color = profile_link_color
profile_link_color = property(GetProfileLinkColor, SetProfileLinkColor)
def GetProfileTextColor(self):
return self._profile_text_color
def SetProfileTextColor(self, profile_text_color):
self._profile_text_color = profile_text_color
profile_text_color = property(GetProfileTextColor, SetProfileTextColor)
def GetProtected(self):
return self._protected
def SetProtected(self, protected):
self._protected = protected
protected = property(GetProtected, SetProtected)
def GetUtcOffset(self):
return self._utc_offset
def SetUtcOffset(self, utc_offset):
self._utc_offset = utc_offset
utc_offset = property(GetUtcOffset, SetUtcOffset)
def GetTimeZone(self):
'''Returns the current time zone string for the user.
Returns:
The descriptive time zone string for the user.
'''
return self._time_zone
def SetTimeZone(self, time_zone):
'''Sets the user's time zone string.
Args:
time_zone: The descriptive time zone to assign for the user.
'''
self._time_zone = time_zone
time_zone = property(GetTimeZone, SetTimeZone)
def GetStatus(self):
'''Get the latest twitter.Status of this user.
Returns:
The latest twitter.Status of this user
'''
return self._status
def SetStatus(self, status):
'''Set the latest twitter.Status of this user.
Args:
status: The latest twitter.Status of this user
'''
self._status = status
status = property(GetStatus, SetStatus,
doc='The latest twitter.Status of this user.')
def GetFriendsCount(self):
'''Get the friend count for this user.
Returns:
The number of users this user has befriended.
'''
return self._friends_count
def SetFriendsCount(self, count):
'''Set the friend count for this user.
Args:
count: The number of users this user has befriended.
'''
self._friends_count = count
friends_count = property(GetFriendsCount, SetFriendsCount,
doc='The number of friends for this user.')
def GetFollowersCount(self):
'''Get the follower count for this user.
Returns:
The number of users following this user.
'''
return self._followers_count
def SetFollowersCount(self, count):
'''Set the follower count for this user.
Args:
count: The number of users following this user.
'''
self._followers_count = count
followers_count = property(GetFollowersCount, SetFollowersCount,
doc='The number of users following this user.')
def GetStatusesCount(self):
'''Get the number of status updates for this user.
Returns:
The number of status updates for this user.
'''
return self._statuses_count
def SetStatusesCount(self, count):
'''Set the status update count for this user.
Args:
count: The number of updates for this user.
'''
self._statuses_count = count
statuses_count = property(GetStatusesCount, SetStatusesCount,
doc='The number of updates for this user.')
def GetFavouritesCount(self):
'''Get the number of favourites for this user.
Returns:
The number of favourites for this user.
'''
return self._favourites_count
def SetFavouritesCount(self, count):
'''Set the favourite count for this user.
Args:
count: The number of favourites for this user.
'''
self._favourites_count = count
favourites_count = property(GetFavouritesCount, SetFavouritesCount,
doc='The number of favourites for this user.')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.id == other.id and \
self.name == other.name and \
self.screen_name == other.screen_name and \
self.location == other.location and \
self.description == other.description and \
self.profile_image_url == other.profile_image_url and \
self.profile_background_tile == other.profile_background_tile and \
self.profile_background_image_url == other.profile_background_image_url and \
self.profile_sidebar_fill_color == other.profile_sidebar_fill_color and \
self.profile_background_color == other.profile_background_color and \
self.profile_link_color == other.profile_link_color and \
self.profile_text_color == other.profile_text_color and \
self.protected == other.protected and \
self.utc_offset == other.utc_offset and \
self.time_zone == other.time_zone and \
self.url == other.url and \
self.statuses_count == other.statuses_count and \
self.followers_count == other.followers_count and \
self.favourites_count == other.favourites_count and \
self.friends_count == other.friends_count and \
self.status == other.status
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.User instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.User instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.User instance.
Returns:
A JSON string representation of this twitter.User instance
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
'''A dict representation of this twitter.User instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.User instance
'''
data = {}
if self.id:
data['id'] = self.id
if self.name:
data['name'] = self.name
if self.screen_name:
data['screen_name'] = self.screen_name
if self.location:
data['location'] = self.location
if self.description:
data['description'] = self.description
if self.profile_image_url:
data['profile_image_url'] = self.profile_image_url
if self.profile_background_tile is not None:
data['profile_background_tile'] = self.profile_background_tile
if self.profile_background_image_url:
data['profile_sidebar_fill_color'] = self.profile_background_image_url
if self.profile_background_color:
data['profile_background_color'] = self.profile_background_color
if self.profile_link_color:
data['profile_link_color'] = self.profile_link_color
if self.profile_text_color:
data['profile_text_color'] = self.profile_text_color
if self.protected is not None:
data['protected'] = self.protected
if self.utc_offset:
data['utc_offset'] = self.utc_offset
if self.time_zone:
data['time_zone'] = self.time_zone
if self.url:
data['url'] = self.url
if self.status:
data['status'] = self.status.AsDict()
if self.friends_count:
data['friends_count'] = self.friends_count
if self.followers_count:
data['followers_count'] = self.followers_count
if self.statuses_count:
data['statuses_count'] = self.statuses_count
if self.favourites_count:
data['favourites_count'] = self.favourites_count
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data: A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.User instance
'''
if 'status' in data:
status = Status.NewFromJsonDict(data['status'])
else:
status = None
return User(id=data.get('id', None),
name=data.get('name', None),
screen_name=data.get('screen_name', None),
location=data.get('location', None),
description=data.get('description', None),
statuses_count=data.get('statuses_count', None),
followers_count=data.get('followers_count', None),
favourites_count=data.get('favourites_count', None),
friends_count=data.get('friends_count', None),
profile_image_url=data.get('profile_image_url', None),
profile_background_tile = data.get('profile_background_tile', None),
profile_background_image_url = data.get('profile_background_image_url', None),
profile_sidebar_fill_color = data.get('profile_sidebar_fill_color', None),
profile_background_color = data.get('profile_background_color', None),
profile_link_color = data.get('profile_link_color', None),
profile_text_color = data.get('profile_text_color', None),
protected = data.get('protected', None),
utc_offset = data.get('utc_offset', None),
time_zone = data.get('time_zone', None),
url=data.get('url', None),
status=status)
class DirectMessage(object):
'''A class representing the DirectMessage structure used by the twitter API.
The DirectMessage structure exposes the following properties:
direct_message.id
direct_message.created_at
direct_message.created_at_in_seconds # read only
direct_message.sender_id
direct_message.sender_screen_name
direct_message.recipient_id
direct_message.recipient_screen_name
direct_message.text
'''
def __init__(self,
id=None,
created_at=None,
sender_id=None,
sender_screen_name=None,
recipient_id=None,
recipient_screen_name=None,
text=None):
'''An object to hold a Twitter direct message.
This class is normally instantiated by the twitter.Api class and
returned in a sequence.
Note: Dates are posted in the form "Sat Jan 27 04:17:38 +0000 2007"
Args:
id: The unique id of this direct message
created_at: The time this direct message was posted
sender_id: The id of the twitter user that sent this message
sender_screen_name: The name of the twitter user that sent this message
recipient_id: The id of the twitter that received this message
recipient_screen_name: The name of the twitter that received this message
text: The text of this direct message
'''
self.id = id
self.created_at = created_at
self.sender_id = sender_id
self.sender_screen_name = sender_screen_name
self.recipient_id = recipient_id
self.recipient_screen_name = recipient_screen_name
self.text = text
def GetId(self):
'''Get the unique id of this direct message.
Returns:
The unique id of this direct message
'''
return self._id
def SetId(self, id):
'''Set the unique id of this direct message.
Args:
id: The unique id of this direct message
'''
self._id = id
id = property(GetId, SetId,
doc='The unique id of this direct message.')
def GetCreatedAt(self):
'''Get the time this direct message was posted.
Returns:
The time this direct message was posted
'''
return self._created_at
def SetCreatedAt(self, created_at):
'''Set the time this direct message was posted.
Args:
created_at: The time this direct message was created
'''
self._created_at = created_at
created_at = property(GetCreatedAt, SetCreatedAt,
doc='The time this direct message was posted.')
def GetCreatedAtInSeconds(self):
'''Get the time this direct message was posted, in seconds since the epoch.
Returns:
The time this direct message was posted, in seconds since the epoch.
'''
return calendar.timegm(rfc822.parsedate(self.created_at))
created_at_in_seconds = property(GetCreatedAtInSeconds,
doc="The time this direct message was "
"posted, in seconds since the epoch")
def GetSenderId(self):
'''Get the unique sender id of this direct message.
Returns:
The unique sender id of this direct message
'''
return self._sender_id
def SetSenderId(self, sender_id):
'''Set the unique sender id of this direct message.
Args:
sender id: The unique sender id of this direct message
'''
self._sender_id = sender_id
sender_id = property(GetSenderId, SetSenderId,
doc='The unique sender id of this direct message.')
def GetSenderScreenName(self):
'''Get the unique sender screen name of this direct message.
Returns:
The unique sender screen name of this direct message
'''
return self._sender_screen_name
def SetSenderScreenName(self, sender_screen_name):
'''Set the unique sender screen name of this direct message.
Args:
sender_screen_name: The unique sender screen name of this direct message
'''
self._sender_screen_name = sender_screen_name
sender_screen_name = property(GetSenderScreenName, SetSenderScreenName,
doc='The unique sender screen name of this direct message.')
def GetRecipientId(self):
'''Get the unique recipient id of this direct message.
Returns:
The unique recipient id of this direct message
'''
return self._recipient_id
def SetRecipientId(self, recipient_id):
'''Set the unique recipient id of this direct message.
Args:
recipient id: The unique recipient id of this direct message
'''
self._recipient_id = recipient_id
recipient_id = property(GetRecipientId, SetRecipientId,
doc='The unique recipient id of this direct message.')
def GetRecipientScreenName(self):
'''Get the unique recipient screen name of this direct message.
Returns:
The unique recipient screen name of this direct message
'''
return self._recipient_screen_name
def SetRecipientScreenName(self, recipient_screen_name):
'''Set the unique recipient screen name of this direct message.
Args:
recipient_screen_name: The unique recipient screen name of this direct message
'''
self._recipient_screen_name = recipient_screen_name
recipient_screen_name = property(GetRecipientScreenName, SetRecipientScreenName,
doc='The unique recipient screen name of this direct message.')
def GetText(self):
'''Get the text of this direct message.
Returns:
The text of this direct message.
'''
return self._text
def SetText(self, text):
'''Set the text of this direct message.
Args:
text: The text of this direct message
'''
self._text = text
text = property(GetText, SetText,
doc='The text of this direct message')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.id == other.id and \
self.created_at == other.created_at and \
self.sender_id == other.sender_id and \
self.sender_screen_name == other.sender_screen_name and \
self.recipient_id == other.recipient_id and \
self.recipient_screen_name == other.recipient_screen_name and \
self.text == other.text
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.DirectMessage instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.DirectMessage instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.DirectMessage instance.
Returns:
A JSON string representation of this twitter.DirectMessage instance
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
'''A dict representation of this twitter.DirectMessage instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.DirectMessage instance
'''
data = {}
if self.id:
data['id'] = self.id
if self.created_at:
data['created_at'] = self.created_at
if self.sender_id:
data['sender_id'] = self.sender_id
if self.sender_screen_name:
data['sender_screen_name'] = self.sender_screen_name
if self.recipient_id:
data['recipient_id'] = self.recipient_id
if self.recipient_screen_name:
data['recipient_screen_name'] = self.recipient_screen_name
if self.text:
data['text'] = self.text
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data: A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.DirectMessage instance
'''
return DirectMessage(created_at=data.get('created_at', None),
recipient_id=data.get('recipient_id', None),
sender_id=data.get('sender_id', None),
text=data.get('text', None),
sender_screen_name=data.get('sender_screen_name', None),
id=data.get('id', None),
recipient_screen_name=data.get('recipient_screen_name', None))
class Api(object):
'''A python interface into the Twitter API
By default, the Api caches results for 1 minute.
Example usage:
To create an instance of the twitter.Api class, with no authentication:
>>> import twitter
>>> api = twitter.Api()
To fetch the most recently posted public twitter status messages:
>>> statuses = api.GetPublicTimeline()
>>> print [s.user.name for s in statuses]
[u'DeWitt', u'Kesuke Miyagi', u'ev', u'Buzz Andersen', u'Biz Stone'] #...
To fetch a single user's public status messages, where "user" is either
a Twitter "short name" or their user id.
>>> statuses = api.GetUserTimeline(user)
>>> print [s.text for s in statuses]
To use authentication, instantiate the twitter.Api class with a
username and password:
>>> api = twitter.Api(username='twitter user', password='twitter pass')
To fetch your friends (after being authenticated):
>>> users = api.GetFriends()
>>> print [u.name for u in users]
To post a twitter status message (after being authenticated):
>>> status = api.PostUpdate('I love python-twitter!')
>>> print status.text
I love python-twitter!
There are many other methods, including:
>>> api.PostUpdates(status)
>>> api.PostDirectMessage(user, text)
>>> api.GetUser(user)
>>> api.GetReplies()
>>> api.GetUserTimeline(user)
>>> api.GetStatus(id)
>>> api.DestroyStatus(id)
>>> api.GetFriendsTimeline(user)
>>> api.GetFriends(user)
>>> api.GetFollowers()
>>> api.GetFeatured()
>>> api.GetDirectMessages()
>>> api.PostDirectMessage(user, text)
>>> api.DestroyDirectMessage(id)
>>> api.DestroyFriendship(user)
>>> api.CreateFriendship(user)
>>> api.GetUserByEmail(email)
'''
DEFAULT_CACHE_TIMEOUT = 60 # cache for 1 minute
_API_REALM = 'Twitter API'
def __init__(self,
username=None,
password=None,
input_encoding=None,
request_headers=None,
cache=DEFAULT_CACHE):
'''Instantiate a new twitter.Api object.
Args:
username: The username of the twitter account. [optional]
password: The password for the twitter account. [optional]
input_encoding: The encoding used to encode input strings. [optional]
request_header: A dictionary of additional HTTP request headers. [optional]
cache:
The cache instance to use. Defaults to DEFAULT_CACHE. Use
None to disable caching. [optional]
'''
self.SetCache(cache)
self._urllib = urllib2
self._cache_timeout = Api.DEFAULT_CACHE_TIMEOUT
self._InitializeRequestHeaders(request_headers)
self._InitializeUserAgent()
self._InitializeDefaultParameters()
self._input_encoding = input_encoding
self.SetCredentials(username, password)
def GetPublicTimeline(self, since_id=None):
'''Fetch the sequnce of public twitter.Status message for all users.
Args:
since_id:
Returns only public statuses with an ID greater than (that is,
more recent than) the specified ID. [Optional]
Returns:
An sequence of twitter.Status instances, one for each message
'''
parameters = {}
if since_id:
parameters['since_id'] = since_id
url = 'http://twitter.com/statuses/public_timeline.json'
json = self._FetchUrl(url, parameters=parameters)
data = simplejson.loads(json)
self._CheckForTwitterError(data)
return [Status.NewFromJsonDict(x) for x in data]
def GetFriendsTimeline(self,
user=None,
count=None,
since=None,
since_id=None):
'''Fetch the sequence of twitter.Status messages for a user's friends
The twitter.Api instance must be authenticated if the user is private.
Args:
user:
Specifies the ID or screen name of the user for whom to return
the friends_timeline. If unspecified, the username and password
must be set in the twitter.Api instance. [Optional]
count:
Specifies the number of statuses to retrieve. May not be
greater than 200. [Optional]
since:
Narrows the returned results to just those statuses created
after the specified HTTP-formatted date. [Optional]
since_id:
Returns only public statuses with an ID greater than (that is,
more recent than) the specified ID. [Optional]
Returns:
A sequence of twitter.Status instances, one for each message
'''
if not user and not self._username:
raise TwitterError("User must be specified if API is not authenticated.")
if user:
url = 'http://twitter.com/statuses/friends_timeline/%s.json' % user
else:
url = 'http://twitter.com/statuses/friends_timeline.json'
parameters = {}
if count is not None:
try:
if int(count) > 200:
raise TwitterError("'count' may not be greater than 200")
except ValueError:
raise TwitterError("'count' must be an integer")
parameters['count'] = count
if since:
parameters['since'] = since
if since_id:
parameters['since_id'] = since_id
json = self._FetchUrl(url, parameters=parameters)
data = simplejson.loads(json)
self._CheckForTwitterError(data)
return [Status.NewFromJsonDict(x) for x in data]
def GetUserTimeline(self,
id=None,
user_id=None,
screen_name=None,
since_id=None,
max_id=None,
count=None,
page=None):
'''Fetch the sequence of public Status messages for a single user.
The twitter.Api instance must be authenticated if the user is private.
Args:
id:
Specifies the ID or screen name of the user for whom to return
the user_timeline. [optional]
user_id:
Specfies the ID of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name. [optional]
screen_name:
Specfies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID. [optional]
since_id:
Returns only public statuses with an ID greater than (that is,
more recent than) the specified ID. [optional]
max_id:
Returns only statuses with an ID less than (that is, older
than) or equal to the specified ID. [optional]
count:
Specifies the number of statuses to retrieve. May not be
greater than 200. [optional]
page:
Specifies the page of results to retrieve. Note: there are
pagination limits. [optional]
Returns:
A sequence of Status instances, one for each message up to count
'''
parameters = {}
if id:
url = 'http://twitter.com/statuses/user_timeline/%s.json' % id
elif user_id:
url = 'http://twitter.com/statuses/user_timeline.json?user_id=%d' % user_id
elif screen_name:
url = ('http://twitter.com/statuses/user_timeline.json?screen_name=%s' %
screen_name)
elif not self._username:
raise TwitterError("User must be specified if API is not authenticated.")
else:
url = 'http://twitter.com/statuses/user_timeline.json'
if since_id:
try:
parameters['since_id'] = long(since_id)
except:
raise TwitterError("since_id must be an integer")
if max_id:
try:
parameters['max_id'] = long(max_id)
except:
raise TwitterError("max_id must be an integer")
if count:
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
if page:
try:
parameters['page'] = int(page)
except:
raise TwitterError("page must be an integer")
json = self._FetchUrl(url, parameters=parameters)
data = simplejson.loads(json)
self._CheckForTwitterError(data)
return [Status.NewFromJsonDict(x) for x in data]
def GetStatus(self, id):
'''Returns a single status message.
The twitter.Api instance must be authenticated if the status message is private.
Args:
id: The numerical ID of the status you're trying to retrieve.
Returns:
A twitter.Status instance representing that status message
'''
try:
if id:
long(id)
except:
raise TwitterError("id must be an long integer")
url = 'http://twitter.com/statuses/show/%s.json' % id
json = self._FetchUrl(url)
data = simplejson.loads(json)
self._CheckForTwitterError(data)
return Status.NewFromJsonDict(data)
def DestroyStatus(self, id):
'''Destroys the status specified by the required ID parameter.
The twitter.Api instance must be authenticated and thee
authenticating user must be the author of the specified status.
Args:
id: The numerical ID of the status you're trying to destroy.
Returns:
A twitter.Status instance representing the destroyed status message
'''
try:
if id:
long(id)
except:
raise TwitterError("id must be an integer")
url = 'http://twitter.com/statuses/destroy/%s.json' % id
json = self._FetchUrl(url, post_data={})
data = simplejson.loads(json)
self._CheckForTwitterError(data)
return Status.NewFromJsonDict(data)
def PostUpdate(self, status, in_reply_to_status_id=None):
'''Post a twitter status message from the authenticated user.
The twitter.Api instance must be authenticated.
Args:
status:
The message text to be posted. Must be less than or equal to
140 characters.
in_reply_to_status_id:
The ID of an existing status that the status to be posted is
in reply to. This implicitly sets the in_reply_to_user_id
attribute of the resulting status to the user ID of the
message being replied to. Invalid/missing status IDs will be
ignored. [Optional]
Returns:
A twitter.Status instance representing the message posted.
'''
if not self._username:
raise TwitterError("The twitter.Api instance must be authenticated.")
url = 'http://twitter.com/statuses/update.json'
if len(status) > CHARACTER_LIMIT:
raise TwitterError("Text must be less than or equal to %d characters. "
"Consider using PostUpdates." % CHARACTER_LIMIT)
data = {'status': status}
if in_reply_to_status_id:
data['in_reply_to_status_id'] = in_reply_to_status_id
json = self._FetchUrl(url, post_data=data)
data = simplejson.loads(json)
self._CheckForTwitterError(data)
return Status.NewFromJsonDict(data)
def PostUpdates(self, status, continuation=None, **kwargs):
'''Post one or more twitter status messages from the authenticated user.
Unlike api.PostUpdate, this method will post multiple status updates
if the message is longer than 140 characters.
The twitter.Api instance must be authenticated.
Args:
status:
The message text to be posted. May be longer than 140 characters.
continuation:
The character string, if any, to be appended to all but the
last message. Note that Twitter strips trailing '...' strings
from messages. Consider using the unicode \u2026 character
(horizontal ellipsis) instead. [Defaults to None]
**kwargs:
See api.PostUpdate for a list of accepted parameters.
Returns:
A of list twitter.Status instance representing the messages posted.
'''
results = list()
if continuation is None:
continuation = ''
line_length = CHARACTER_LIMIT - len(continuation)
lines = textwrap.wrap(status, line_length)
for line in lines[0:-1]:
results.append(self.PostUpdate(line + continuation, **kwargs))
results.append(self.PostUpdate(lines[-1], **kwargs))
return results
def GetReplies(self, since=None, since_id=None, page=None):
'''Get a sequence of status messages representing the 20 most recent
replies (status updates prefixed with @username) to the authenticating
user.
Args:
page:
since:
Narrows the returned results to just those statuses created
after the specified HTTP-formatted date. [optional]
since_id:
Returns only public statuses with an ID greater than (that is,
more recent than) the specified ID. [Optional]
Returns:
A sequence of twitter.Status instances, one for each reply to the user.
'''
url = 'http://twitter.com/statuses/replies.json'
if not self._username:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if since:
parameters['since'] = since
if since_id:
parameters['since_id'] = since_id
if page:
parameters['page'] = page
json = self._FetchUrl(url, parameters=parameters)
data = simplejson.loads(json)
self._CheckForTwitterError(data)
return [Status.NewFromJsonDict(x) for x in data]
def GetFriends(self, user=None, page=None):
'''Fetch the sequence of twitter.User instances, one for each friend.
Args:
user: the username or id of the user whose friends you are fetching. If
not specified, defaults to the authenticated user. [optional]
The twitter.Api instance must be authenticated.
Returns:
A sequence of twitter.User instances, one for each friend
'''
if not user and not self._username:
raise TwitterError("twitter.Api instance must be authenticated")
if user:
url = 'http://twitter.com/statuses/friends/%s.json' % user
else:
url = 'http://twitter.com/statuses/friends.json'
parameters = {}
if page:
parameters['page'] = page
json = self._FetchUrl(url, parameters=parameters)
data = simplejson.loads(json)
self._CheckForTwitterError(data)
return [User.NewFromJsonDict(x) for x in data]
def GetFollowers(self, page=None):
'''Fetch the sequence of twitter.User instances, one for each follower
The twitter.Api instance must be authenticated.
Returns:
A sequence of twitter.User instances, one for each follower
'''
if not self._username:
raise TwitterError("twitter.Api instance must be authenticated")
url = 'http://twitter.com/statuses/followers.json'
parameters = {}
if page:
parameters['page'] = page
json = self._FetchUrl(url, parameters=parameters)
data = simplejson.loads(json)
self._CheckForTwitterError(data)
return [User.NewFromJsonDict(x) for x in data]
def GetFeatured(self):
'''Fetch the sequence of twitter.User instances featured on twitter.com
The twitter.Api instance must be authenticated.
Returns:
A sequence of twitter.User instances
'''
url = 'http://twitter.com/statuses/featured.json'
json = self._FetchUrl(url)
data = simplejson.loads(json)
self._CheckForTwitterError(data)
return [User.NewFromJsonDict(x) for x in data]
def GetUser(self, user):
'''Returns a single user.
The twitter.Api instance must be authenticated.
Args:
user: The username or id of the user to retrieve.
Returns:
A twitter.User instance representing that user
'''
url = 'http://twitter.com/users/show/%s.json' % user
json = self._FetchUrl(url)
data = simplejson.loads(json)
self._CheckForTwitterError(data)
return User.NewFromJsonDict(data)
def GetDirectMessages(self, since=None, since_id=None, page=None):
'''Returns a list of the direct messages sent to the authenticating user.
The twitter.Api instance must be authenticated.
Args:
since:
Narrows the returned results to just those statuses created
after the specified HTTP-formatted date. [optional]
since_id:
Returns only public statuses with an ID greater than (that is,
more recent than) the specified ID. [Optional]
Returns:
A sequence of twitter.DirectMessage instances
'''
url = 'http://twitter.com/direct_messages.json'
if not self._username:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if since:
parameters['since'] = since
if since_id:
parameters['since_id'] = since_id
if page:
parameters['page'] = page
json = self._FetchUrl(url, parameters=parameters)
data = simplejson.loads(json)
self._CheckForTwitterError(data)
return [DirectMessage.NewFromJsonDict(x) for x in data]
def PostDirectMessage(self, user, text):
'''Post a twitter direct message from the authenticated user
The twitter.Api instance must be authenticated.
Args:
user: The ID or screen name of the recipient user.
text: The message text to be posted. Must be less than 140 characters.
Returns:
A twitter.DirectMessage instance representing the message posted
'''
if not self._username:
raise TwitterError("The twitter.Api instance must be authenticated.")
url = 'http://twitter.com/direct_messages/new.json'
data = {'text': text, 'user': user}
json = self._FetchUrl(url, post_data=data)
data = simplejson.loads(json)
self._CheckForTwitterError(data)
return DirectMessage.NewFromJsonDict(data)
def DestroyDirectMessage(self, id):
'''Destroys the direct message specified in the required ID parameter.
The twitter.Api instance must be authenticated, and the
authenticating user must be the recipient of the specified direct
message.
Args:
id: The id of the direct message to be destroyed
Returns:
A twitter.DirectMessage instance representing the message destroyed
'''
url = 'http://twitter.com/direct_messages/destroy/%s.json' % id
json = self._FetchUrl(url, post_data={})
data = simplejson.loads(json)
self._CheckForTwitterError(data)
return DirectMessage.NewFromJsonDict(data)
def CreateFriendship(self, user):
'''Befriends the user specified in the user parameter as the authenticating user.
The twitter.Api instance must be authenticated.
Args:
The ID or screen name of the user to befriend.
Returns:
A twitter.User instance representing the befriended user.
'''
url = 'http://twitter.com/friendships/create/%s.json' % user
json = self._FetchUrl(url, post_data={})
data = simplejson.loads(json)
self._CheckForTwitterError(data)
return User.NewFromJsonDict(data)
def DestroyFriendship(self, user):
'''Discontinues friendship with the user specified in the user parameter.
The twitter.Api instance must be authenticated.
Args:
The ID or screen name of the user with whom to discontinue friendship.
Returns:
A twitter.User instance representing the discontinued friend.
'''
url = 'http://twitter.com/friendships/destroy/%s.json' % user
json = self._FetchUrl(url, post_data={})
data = simplejson.loads(json)
self._CheckForTwitterError(data)
return User.NewFromJsonDict(data)
def CreateFavorite(self, status):
'''Favorites the status specified in the status parameter as the authenticating user.
Returns the favorite status when successful.
The twitter.Api instance must be authenticated.
Args:
The twitter.Status instance to mark as a favorite.
Returns:
A twitter.Status instance representing the newly-marked favorite.
'''
url = 'http://twitter.com/favorites/create/%s.json' % status.id
json = self._FetchUrl(url, post_data={})
data = simplejson.loads(json)
self._CheckForTwitterError(data)
return Status.NewFromJsonDict(data)
def DestroyFavorite(self, status):
'''Un-favorites the status specified in the ID parameter as the authenticating user.
Returns the un-favorited status in the requested format when successful.
The twitter.Api instance must be authenticated.
Args:
The twitter.Status to unmark as a favorite.
Returns:
A twitter.Status instance representing the newly-unmarked favorite.
'''
url = 'http://twitter.com/favorites/destroy/%s.json' % status.id
json = self._FetchUrl(url, post_data={})
data = simplejson.loads(json)
self._CheckForTwitterError(data)
return Status.NewFromJsonDict(data)
def GetUserByEmail(self, email):
'''Returns a single user by email address.
Args:
email: The email of the user to retrieve.
Returns:
A twitter.User instance representing that user
'''
url = 'http://twitter.com/users/show.json?email=%s' % email
json = self._FetchUrl(url)
data = simplejson.loads(json)
self._CheckForTwitterError(data)
return User.NewFromJsonDict(data)
def VerifyCredentials(self):
'''Returns a twitter.User instance if the authenticating user is valid.
Returns:
A twitter.User instance representing that user if the
credentials are valid, None otherwise.
'''
if not self._username:
raise TwitterError("Api instance must first be given user credentials.")
url = 'http://twitter.com/account/verify_credentials.json'
try:
json = self._FetchUrl(url, no_cache=True)
except urllib2.HTTPError, http_error:
if http_error.code == httplib.UNAUTHORIZED:
return None
else:
raise http_error
data = simplejson.loads(json)
self._CheckForTwitterError(data)
return User.NewFromJsonDict(data)
def SetCredentials(self, username, password):
'''Set the username and password for this instance
Args:
username: The twitter username.
password: The twitter password.
'''
self._username = username
self._password = password
def ClearCredentials(self):
'''Clear the username and password for this instance
'''
self._username = None
self._password = None
def SetCache(self, cache):
'''Override the default cache. Set to None to prevent caching.
Args:
cache: an instance that supports the same API as the twitter._FileCache
'''
if cache == DEFAULT_CACHE:
self._cache = _FileCache()
else:
self._cache = cache
def SetUrllib(self, urllib):
'''Override the default urllib implementation.
Args:
urllib: an instance that supports the same API as the urllib2 module
'''
self._urllib = urllib
def SetCacheTimeout(self, cache_timeout):
'''Override the default cache timeout.
Args:
cache_timeout: time, in seconds, that responses should be reused.
'''
self._cache_timeout = cache_timeout
def SetUserAgent(self, user_agent):
'''Override the default user agent
Args:
user_agent: a string that should be send to the server as the User-agent
'''
self._request_headers['User-Agent'] = user_agent
def SetXTwitterHeaders(self, client, url, version):
'''Set the X-Twitter HTTP headers that will be sent to the server.
Args:
client:
The client name as a string. Will be sent to the server as
the 'X-Twitter-Client' header.
url:
The URL of the meta.xml as a string. Will be sent to the server
as the 'X-Twitter-Client-URL' header.
version:
The client version as a string. Will be sent to the server
as the 'X-Twitter-Client-Version' header.
'''
self._request_headers['X-Twitter-Client'] = client
self._request_headers['X-Twitter-Client-URL'] = url
self._request_headers['X-Twitter-Client-Version'] = version
def SetSource(self, source):
'''Suggest the "from source" value to be displayed on the Twitter web site.
The value of the 'source' parameter must be first recognized by
the Twitter server. New source values are authorized on a case by
case basis by the Twitter development team.
Args:
source:
The source name as a string. Will be sent to the server as
the 'source' parameter.
'''
self._default_params['source'] = source
def _BuildUrl(self, url, path_elements=None, extra_params=None):
# Break url into consituent parts
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
# Add any additional path elements to the path
if path_elements:
# Filter out the path elements that have a value of None
p = [i for i in path_elements if i]
if not path.endswith('/'):
path += '/'
path += '/'.join(p)
# Add any additional query parameters to the query string
if extra_params and len(extra_params) > 0:
extra_query = self._EncodeParameters(extra_params)
# Add it to the existing query
if query:
query += '&' + extra_query
else:
query = extra_query
# Return the rebuilt URL
return urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
def _InitializeRequestHeaders(self, request_headers):
if request_headers:
self._request_headers = request_headers
else:
self._request_headers = {}
def _InitializeUserAgent(self):
user_agent = 'Python-urllib/%s (python-twitter/%s)' % \
(self._urllib.__version__, __version__)
self.SetUserAgent(user_agent)
def _InitializeDefaultParameters(self):
self._default_params = {}
def _AddAuthorizationHeader(self, username, password):
if username and password:
basic_auth = base64.encodestring('%s:%s' % (username, password))[:-1]
self._request_headers['Authorization'] = 'Basic %s' % basic_auth
def _RemoveAuthorizationHeader(self):
if self._request_headers and 'Authorization' in self._request_headers:
del self._request_headers['Authorization']
def _GetOpener(self, url, username=None, password=None):
if username and password:
self._AddAuthorizationHeader(username, password)
handler = self._urllib.HTTPBasicAuthHandler()
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
handler.add_password(Api._API_REALM, netloc, username, password)
opener = self._urllib.build_opener(handler)
else:
opener = self._urllib.build_opener()
opener.addheaders = self._request_headers.items()
return opener
def _Encode(self, s):
if self._input_encoding:
return unicode(s, self._input_encoding).encode('utf-8')
else:
return unicode(s).encode('utf-8')
def _EncodeParameters(self, parameters):
'''Return a string in key=value&key=value form
Values of None are not included in the output string.
Args:
parameters:
A dict of (key, value) tuples, where value is encoded as
specified by self._encoding
Returns:
A URL-encoded string in "key=value&key=value" form
'''
if parameters is None:
return None
else:
return urllib.urlencode(dict([(k, self._Encode(v)) for k, v in parameters.items() if v is not None]))
def _EncodePostData(self, post_data):
'''Return a string in key=value&key=value form
Values are assumed to be encoded in the format specified by self._encoding,
and are subsequently URL encoded.
Args:
post_data:
A dict of (key, value) tuples, where value is encoded as
specified by self._encoding
Returns:
A URL-encoded string in "key=value&key=value" form
'''
if post_data is None:
return None
else:
return urllib.urlencode(dict([(k, self._Encode(v)) for k, v in post_data.items()]))
def _CheckForTwitterError(self, data):
"""Raises a TwitterError if twitter returns an error message.
Args:
data: A python dict created from the Twitter json response
Raises:
TwitterError wrapping the twitter error message if one exists.
"""
# Twitter errors are relatively unlikely, so it is faster
# to check first, rather than try and catch the exception
if 'error' in data:
raise TwitterError(data['error'])
def _FetchUrl(self,
url,
post_data=None,
parameters=None,
no_cache=None):
'''Fetch a URL, optionally caching for a specified time.
Args:
url: The URL to retrieve
post_data:
A dict of (str, unicode) key/value pairs. If set, POST will be used.
parameters:
A dict whose key/value pairs should encoded and added
to the query string. [OPTIONAL]
no_cache: If true, overrides the cache on the current request
Returns:
A string containing the body of the response.
'''
# Build the extra parameters dict
extra_params = {}
if self._default_params:
extra_params.update(self._default_params)
if parameters:
extra_params.update(parameters)
# Add key/value parameters to the query string of the url
url = self._BuildUrl(url, extra_params=extra_params)
# Get a url opener that can handle basic auth
opener = self._GetOpener(url, username=self._username, password=self._password)
encoded_post_data = self._EncodePostData(post_data)
# Open and return the URL immediately if we're not going to cache
if encoded_post_data or no_cache or not self._cache or not self._cache_timeout:
url_data = opener.open(url, encoded_post_data).read()
opener.close()
else:
# Unique keys are a combination of the url and the username
if self._username:
key = self._username + ':' + url
else:
key = url
# See if it has been cached before
last_cached = self._cache.GetCachedTime(key)
# If the cached version is outdated then fetch another and store it
if not last_cached or time.time() >= last_cached + self._cache_timeout:
url_data = opener.open(url, encoded_post_data).read()
opener.close()
self._cache.Set(key, url_data)
else:
url_data = self._cache.Get(key)
# Always return the latest version
return url_data
class _FileCacheError(Exception):
'''Base exception class for FileCache related errors'''
class _FileCache(object):
DEPTH = 3
def __init__(self,root_directory=None):
self._InitializeRootDirectory(root_directory)
def Get(self,key):
path = self._GetPath(key)
if os.path.exists(path):
return open(path).read()
else:
return None
def Set(self,key,data):
path = self._GetPath(key)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.isdir(directory):
raise _FileCacheError('%s exists but is not a directory' % directory)
temp_fd, temp_path = tempfile.mkstemp()
temp_fp = os.fdopen(temp_fd, 'w')
temp_fp.write(data)
temp_fp.close()
if not path.startswith(self._root_directory):
raise _FileCacheError('%s does not appear to live under %s' %
(path, self._root_directory))
if os.path.exists(path):
os.remove(path)
os.rename(temp_path, path)
def Remove(self,key):
path = self._GetPath(key)
if not path.startswith(self._root_directory):
raise _FileCacheError('%s does not appear to live under %s' %
(path, self._root_directory ))
if os.path.exists(path):
os.remove(path)
def GetCachedTime(self,key):
path = self._GetPath(key)
if os.path.exists(path):
return os.path.getmtime(path)
else:
return None
def _GetUsername(self):
'''Attempt to find the username in a cross-platform fashion.'''
try:
return os.getenv('USER') or \
os.getenv('LOGNAME') or \
os.getenv('USERNAME') or \
os.getlogin() or \
'nobody'
except (IOError, OSError), e:
return 'nobody'
def _GetTmpCachePath(self):
username = self._GetUsername()
cache_directory = 'python.cache_' + username
return os.path.join(tempfile.gettempdir(), cache_directory)
def _InitializeRootDirectory(self, root_directory):
if not root_directory:
root_directory = self._GetTmpCachePath()
root_directory = os.path.abspath(root_directory)
if not os.path.exists(root_directory):
os.mkdir(root_directory)
if not os.path.isdir(root_directory):
raise _FileCacheError('%s exists but is not a directory' %
root_directory)
self._root_directory = root_directory
def _GetPath(self,key):
try:
hashed_key = md5(key).hexdigest()
except TypeError:
hashed_key = md5.new(key).hexdigest()
return os.path.join(self._root_directory,
self._GetPrefix(hashed_key),
hashed_key)
def _GetPrefix(self,hashed_key):
return os.path.sep.join(hashed_key[0:_FileCache.DEPTH])
|
apache-2.0
|
dkarakats/edx-platform
|
common/test/acceptance/pages/studio/component_editor.py
|
65
|
5068
|
from bok_choy.page_object import PageObject
from selenium.webdriver.common.keys import Keys
from utils import click_css
from selenium.webdriver.support.ui import Select
class BaseComponentEditorView(PageObject):
"""
A base :class:`.PageObject` for the component and visibility editors.
This class assumes that the editor is our default editor as displayed for xmodules.
"""
BODY_SELECTOR = '.xblock-editor'
def __init__(self, browser, locator):
"""
Args:
browser (selenium.webdriver): The Selenium-controlled browser that this page is loaded in.
locator (str): The locator that identifies which xblock this :class:`.xblock-editor` relates to.
"""
super(BaseComponentEditorView, self).__init__(browser)
self.locator = locator
def is_browser_on_page(self):
return self.q(css='{}[data-locator="{}"]'.format(self.BODY_SELECTOR, self.locator)).present
def _bounded_selector(self, selector):
"""
Return `selector`, but limited to this particular `ComponentEditorView` context
"""
return '{}[data-locator="{}"] {}'.format(
self.BODY_SELECTOR,
self.locator,
selector
)
def url(self):
"""
Returns None because this is not directly accessible via URL.
"""
return None
def save(self):
"""
Clicks save button.
"""
click_css(self, 'a.action-save')
def cancel(self):
"""
Clicks cancel button.
"""
click_css(self, 'a.action-cancel', require_notification=False)
class ComponentEditorView(BaseComponentEditorView):
"""
A :class:`.PageObject` representing the rendered view of a component editor.
"""
def get_setting_element(self, label):
"""
Returns the index of the setting entry with given label (display name) within the Settings modal.
"""
settings_button = self.q(css='.edit-xblock-modal .editor-modes .settings-button')
if settings_button.is_present():
settings_button.click()
setting_labels = self.q(css=self._bounded_selector('.metadata_edit .wrapper-comp-setting .setting-label'))
for index, setting in enumerate(setting_labels):
if setting.text == label:
return self.q(css=self._bounded_selector('.metadata_edit div.wrapper-comp-setting .setting-input'))[index]
return None
def set_field_value_and_save(self, label, value):
"""
Sets the text field with given label (display name) to the specified value, and presses Save.
"""
elem = self.get_setting_element(label)
# Clear the current value, set the new one, then
# Tab to move to the next field (so change event is triggered).
elem.clear()
elem.send_keys(value)
elem.send_keys(Keys.TAB)
self.save()
def set_select_value_and_save(self, label, value):
"""
Sets the select with given label (display name) to the specified value, and presses Save.
"""
elem = self.get_setting_element(label)
select = Select(elem)
select.select_by_value(value)
self.save()
def get_selected_option_text(self, label):
"""
Returns the text of the first selected option for the select with given label (display name).
"""
elem = self.get_setting_element(label)
if elem:
select = Select(elem)
return select.first_selected_option.text
else:
return None
class ComponentVisibilityEditorView(BaseComponentEditorView):
"""
A :class:`.PageObject` representing the rendered view of a component visibility editor.
"""
OPTION_SELECTOR = '.modal-section-content li.field'
@property
def all_options(self):
"""
Return all visibility 'li' options.
"""
return self.q(css=self._bounded_selector(self.OPTION_SELECTOR)).results
@property
def selected_options(self):
"""
Return all selected visibility 'li' options.
"""
results = []
for option in self.all_options:
button = option.find_element_by_css_selector('input.input')
if button.is_selected():
results.append(option)
return results
def select_option(self, label_text, save=True):
"""
Click the first li which has a label matching `label_text`.
Arguments:
label_text (str): Text of a label accompanying the input
which should be clicked.
save (boolean): Whether the "save" button should be clicked
afterwards.
Returns:
bool: Whether the label was found and clicked.
"""
for option in self.all_options:
if label_text in option.text:
option.click()
if save:
self.save()
return True
return False
|
agpl-3.0
|
cappatar/knesset-data-pipelines
|
datapackage_pipelines_knesset/members/processors/load_members.py
|
1
|
1419
|
from datapackage_pipelines_knesset.common.base_processors.add_resource import AddResourceBaseProcessor
# only loads members with the following positionId:
SUPPORTED_POSITION_IDS = [43, 61]
class Processor(AddResourceBaseProcessor):
def _get_schema(self, resource_descriptor):
return resource_descriptor.get("schema", {
"fields": [
{"name": "url", "type": "string", "description": "url to download protocol from"},
{
"name": "kns_person_id", "type": "integer",
"description": "primary key from kns_person table"}
],
"primaryKey": ["kns_person_id"]
})
def _get_new_resource(self):
person_table = self.db_meta.tables.get("kns_person")
persontoposition_table = self.db_meta.tables.get("kns_persontoposition")
if person_table is None or persontoposition_table is None:
raise Exception("processor requires kns person tables to exist")
for db_row in self.db_session\
.query(person_table, persontoposition_table)\
.filter(persontoposition_table.p.PersonID==person_table.p.PersonID)\
.filter(persontoposition_table.p.PositionID.in_(SUPPORTED_POSITION_IDS))\
.all():
row = db_row._asdict()
yield {"kns_person_id": row["PersonID"]}
if __name__ == "__main__":
Processor.main()
|
mit
|
trafi/gyp
|
test/mac/gyptest-loadable-module.py
|
54
|
1280
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tests that a loadable_module target is built correctly.
"""
import TestGyp
import os
import struct
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'loadable-module'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
# Binary.
binary = test.built_file_path(
'test_loadable_module.plugin/Contents/MacOS/test_loadable_module',
chdir=CHDIR)
test.must_exist(binary)
MH_BUNDLE = 8
if struct.unpack('4I', open(binary, 'rb').read(16))[3] != MH_BUNDLE:
test.fail_test()
# Info.plist.
info_plist = test.built_file_path(
'test_loadable_module.plugin/Contents/Info.plist', chdir=CHDIR)
test.must_exist(info_plist)
test.must_contain(info_plist, """
<key>CFBundleExecutable</key>
<string>test_loadable_module</string>
""")
# PkgInfo.
test.built_file_must_not_exist(
'test_loadable_module.plugin/Contents/PkgInfo', chdir=CHDIR)
test.built_file_must_not_exist(
'test_loadable_module.plugin/Contents/Resources', chdir=CHDIR)
test.pass_test()
|
bsd-3-clause
|
bertucho/epic-movie-quotes-quiz
|
dialogos/build/Twisted/twisted/logger/test/test_io.py
|
11
|
7231
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{twisted.logger._io}.
"""
from __future__ import print_function
import sys
from twisted.trial import unittest
from .._levels import LogLevel
from .._logger import Logger
from .._observer import LogPublisher
from .._io import LoggingFile
class LoggingFileTests(unittest.TestCase):
"""
Tests for L{LoggingFile}.
"""
def setUp(self):
"""
Create a logger for test L{LoggingFile} instances to use.
"""
self.publisher = LogPublisher()
self.logger = Logger(observer=self.publisher)
def test_softspace(self):
"""
L{LoggingFile.softspace} is 0.
"""
self.assertEqual(LoggingFile.softspace, 0)
def test_readOnlyAttributes(self):
"""
Some L{LoggingFile} attributes are read-only.
"""
f = LoggingFile(self.logger)
self.assertRaises(AttributeError, setattr, f, "closed", True)
self.assertRaises(AttributeError, setattr, f, "encoding", "utf-8")
self.assertRaises(AttributeError, setattr, f, "mode", "r")
self.assertRaises(AttributeError, setattr, f, "newlines", ["\n"])
self.assertRaises(AttributeError, setattr, f, "name", "foo")
def test_unsupportedMethods(self):
"""
Some L{LoggingFile} methods are unsupported.
"""
f = LoggingFile(self.logger)
self.assertRaises(IOError, f.read)
self.assertRaises(IOError, f.next)
self.assertRaises(IOError, f.readline)
self.assertRaises(IOError, f.readlines)
self.assertRaises(IOError, f.xreadlines)
self.assertRaises(IOError, f.seek)
self.assertRaises(IOError, f.tell)
self.assertRaises(IOError, f.truncate)
def test_level(self):
"""
Default level is L{LogLevel.info} if not set.
"""
f = LoggingFile(self.logger)
self.assertEqual(f.level, LogLevel.info)
f = LoggingFile(self.logger, level=LogLevel.error)
self.assertEqual(f.level, LogLevel.error)
def test_encoding(self):
"""
Default encoding is C{sys.getdefaultencoding()} if not set.
"""
f = LoggingFile(self.logger)
self.assertEqual(f.encoding, sys.getdefaultencoding())
f = LoggingFile(self.logger, encoding="utf-8")
self.assertEqual(f.encoding, "utf-8")
def test_mode(self):
"""
Reported mode is C{"w"}.
"""
f = LoggingFile(self.logger)
self.assertEqual(f.mode, "w")
def test_newlines(self):
"""
The C{newlines} attribute is C{None}.
"""
f = LoggingFile(self.logger)
self.assertEqual(f.newlines, None)
def test_name(self):
"""
The C{name} attribute is fixed.
"""
f = LoggingFile(self.logger)
self.assertEqual(
f.name,
"<LoggingFile twisted.logger.test.test_io#info>"
)
def test_close(self):
"""
L{LoggingFile.close} closes the file.
"""
f = LoggingFile(self.logger)
f.close()
self.assertEqual(f.closed, True)
self.assertRaises(ValueError, f.write, "Hello")
def test_flush(self):
"""
L{LoggingFile.flush} does nothing.
"""
f = LoggingFile(self.logger)
f.flush()
def test_fileno(self):
"""
L{LoggingFile.fileno} returns C{-1}.
"""
f = LoggingFile(self.logger)
self.assertEqual(f.fileno(), -1)
def test_isatty(self):
"""
L{LoggingFile.isatty} returns C{False}.
"""
f = LoggingFile(self.logger)
self.assertEqual(f.isatty(), False)
def test_writeBuffering(self):
"""
Writing buffers correctly.
"""
f = self.observedFile()
f.write("Hello")
self.assertEqual(f.messages, [])
f.write(", world!\n")
self.assertEqual(f.messages, [u"Hello, world!"])
f.write("It's nice to meet you.\n\nIndeed.")
self.assertEqual(
f.messages,
[
u"Hello, world!",
u"It's nice to meet you.",
u"",
]
)
def test_writeBytesDecoded(self):
"""
Bytes are decoded to unicode.
"""
f = self.observedFile(encoding="utf-8")
f.write(b"Hello, Mr. S\xc3\xa1nchez\n")
self.assertEqual(f.messages, [u"Hello, Mr. S\xe1nchez"])
def test_writeUnicode(self):
"""
Unicode is unmodified.
"""
f = self.observedFile(encoding="utf-8")
f.write(u"Hello, Mr. S\xe1nchez\n")
self.assertEqual(f.messages, [u"Hello, Mr. S\xe1nchez"])
def test_writeLevel(self):
"""
Log level is emitted properly.
"""
f = self.observedFile()
f.write("Hello\n")
self.assertEqual(len(f.events), 1)
self.assertEqual(f.events[0]["log_level"], LogLevel.info)
f = self.observedFile(level=LogLevel.error)
f.write("Hello\n")
self.assertEqual(len(f.events), 1)
self.assertEqual(f.events[0]["log_level"], LogLevel.error)
def test_writeFormat(self):
"""
Log format is C{u"{message}"}.
"""
f = self.observedFile()
f.write("Hello\n")
self.assertEqual(len(f.events), 1)
self.assertEqual(f.events[0]["log_format"], u"{log_io}")
def test_writelinesBuffering(self):
"""
C{writelines} does not add newlines.
"""
# Note this is different behavior than t.p.log.StdioOnnaStick.
f = self.observedFile()
f.writelines(("Hello", ", ", ""))
self.assertEqual(f.messages, [])
f.writelines(("world!\n",))
self.assertEqual(f.messages, [u"Hello, world!"])
f.writelines(("It's nice to meet you.\n\n", "Indeed."))
self.assertEqual(
f.messages,
[
u"Hello, world!",
u"It's nice to meet you.",
u"",
]
)
def test_print(self):
"""
L{LoggingFile} can replace L{sys.stdout}.
"""
f = self.observedFile()
self.patch(sys, "stdout", f)
print("Hello,", end=" ")
print("world.")
self.assertEqual(f.messages, [u"Hello, world."])
def observedFile(self, **kwargs):
"""
Construct a L{LoggingFile} with a built-in observer.
@param kwargs: keyword arguments for the L{LoggingFile}.
@type kwargs: L{dict}
@return: a L{LoggingFile} with an observer that appends received
events into the file's C{events} attribute (a L{list}) and
event messages into the file's C{messages} attribute (a L{list}).
@rtype: L{LoggingFile}
"""
def observer(event):
f.events.append(event)
if "log_io" in event:
f.messages.append(event["log_io"])
log = Logger(observer=observer)
f = LoggingFile(logger=log, **kwargs)
f.events = []
f.messages = []
return f
|
mit
|
Midrya/chromium
|
third_party/gsutil/gslib/commands/getlogging.py
|
51
|
5500
|
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gslib.command import Command
from gslib.command import COMMAND_NAME
from gslib.command import COMMAND_NAME_ALIASES
from gslib.command import CONFIG_REQUIRED
from gslib.command import FILE_URIS_OK
from gslib.command import MAX_ARGS
from gslib.command import MIN_ARGS
from gslib.command import PROVIDER_URIS_OK
from gslib.command import SUPPORTED_SUB_ARGS
from gslib.command import URIS_START_ARG
from gslib.help_provider import HELP_NAME
from gslib.help_provider import HELP_NAME_ALIASES
from gslib.help_provider import HELP_ONE_LINE_SUMMARY
from gslib.help_provider import HELP_TEXT
from gslib.help_provider import HelpType
from gslib.help_provider import HELP_TYPE
_detailed_help_text = ("""
<B>SYNOPSIS</B>
gsutil getlogging uri
<B>DESCRIPTION</B>
If logging is enabled for the specified bucket uri, the server responds
with a <Logging> XML element that looks something like this:
<?xml version="1.0" ?>
<Logging>
<LogBucket>
logs-bucket
</LogBucket>
<LogObjectPrefix>
my-logs-enabled-bucket
</LogObjectPrefix>
</Logging>
If logging is not enabled, an empty <Logging> element is returned.
You can download log data from your log bucket using the gsutil cp command.
<B>ACCESS LOG FIELDS</B>
Field Type Description
time_micros integer The time that the request was completed, in
microseconds since the Unix epoch.
c_ip string The IP address from which the request was made.
The "c" prefix indicates that this is information
about the client.
c_ip_type integer The type of IP in the c_ip field:
A value of 1 indicates an IPV4 address.
A value of 2 indicates an IPV6 address.
c_ip_region string Reserved for future use.
cs_method string The HTTP method of this request. The "cs" prefix
indicates that this information was sent from the
client to the server.
cs_uri string The URI of the request.
sc_status integer The HTTP status code the server sent in response.
The "sc" prefix indicates that this information
was sent from the server to the client.
cs_bytes integer The number of bytes sent in the request.
sc_bytes integer The number of bytes sent in the response.
time_taken_micros integer The time it took to serve the request in
microseconds.
cs_host string The host in the original request.
cs_referrer string The HTTP referrer for the request.
cs_user_agent string The User-Agent of the request.
s_request_id string The request identifier.
cs_operation string The Google Cloud Storage operation e.g.
GET_Object.
cs_bucket string The bucket specified in the request. If this is a
list buckets request, this can be null.
cs_object string The object specified in this request. This can be
null.
<B>STORAGE DATA FIELDS</B>
Field Type Description
bucket string The name of the bucket.
storage_byte_hours integer Average size in bytes/per hour of that bucket.
""")
class GetLoggingCommand(Command):
"""Implementation of gsutil getlogging command."""
# Command specification (processed by parent class).
command_spec = {
# Name of command.
COMMAND_NAME : 'getlogging',
# List of command name aliases.
COMMAND_NAME_ALIASES : [],
# Min number of args required by this command.
MIN_ARGS : 1,
# Max number of args required by this command, or NO_MAX.
MAX_ARGS : 1,
# Getopt-style string specifying acceptable sub args.
SUPPORTED_SUB_ARGS : '',
# True if file URIs acceptable for this command.
FILE_URIS_OK : False,
# True if provider-only URIs acceptable for this command.
PROVIDER_URIS_OK : False,
# Index in args of first URI arg.
URIS_START_ARG : 0,
# True if must configure gsutil before running command.
CONFIG_REQUIRED : True,
}
help_spec = {
# Name of command or auxiliary help info for which this help applies.
HELP_NAME : 'getlogging',
# List of help name aliases.
HELP_NAME_ALIASES : [],
# Type of help:
HELP_TYPE : HelpType.COMMAND_HELP,
# One line summary of this help.
HELP_ONE_LINE_SUMMARY : 'Get logging configuration for a bucket',
# The full help text.
HELP_TEXT : _detailed_help_text,
}
# Command entry point.
def RunCommand(self):
self.GetXmlSubresource('logging', self.args[0])
return 0
|
bsd-3-clause
|
surjit84/pmtk3
|
matlabTools/metaTools/googlecode_upload.py
|
304
|
8912
|
#!/usr/bin/env python
#
# Copyright 2006, 2007 Google Inc. All Rights Reserved.
# Author: danderson@google.com (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account. If you have already committed to your project's
# Subversion repository, the script will automatically retrieve your
# credentials from there (unless disabled, see the output of '--help'
# for details).
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = 'danderson@google.com (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
import sys
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
# The login is the user part of user@gmail.com. If the login provided
# is in the full user@domain form, strip it down.
if user_name.endswith('@gmail.com'):
user_name = user_name[:user_name.index('@gmail.com')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path, 'rb')
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def upload_find_auth(file_path, project_name, summary, labels=None,
user_name=None, password=None, tries=3):
"""Find credentials and upload a file to a Google Code project's file server.
file_path, project_name, summary, and labels are passed as-is to upload.
Args:
file_path: The local path to the file.
project_name: The name of your project on Google Code.
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
config_dir: Path to Subversion configuration directory, 'none', or None.
user_name: Your Google account name.
tries: How many attempts to make.
"""
while tries > 0:
if user_name is None:
# Read username if not specified or loaded from svn config, or on
# subsequent tries.
sys.stdout.write('Please enter your googlecode.com username: ')
sys.stdout.flush()
user_name = sys.stdin.readline().rstrip()
if password is None:
# Read password if not loaded from svn config, or on subsequent tries.
print 'Please enter your googlecode.com password.'
print '** Note that this is NOT your Gmail account password! **'
print 'It is the password you use to access Subversion repositories,'
print 'and can be found here: http://code.google.com/hosting/settings'
password = getpass.getpass()
status, reason, url = upload(file_path, project_name, user_name, password,
summary, labels)
# Returns 403 Forbidden instead of 401 Unauthorized for bad
# credentials as of 2007-07-17.
if status in [httplib.FORBIDDEN, httplib.UNAUTHORIZED]:
# Rest for another try.
user_name = password = None
tries = tries - 1
else:
# We're done.
break
return status, reason, url
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT [options] FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-w', '--password', dest='password',
help='Your Google Code password')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of comma-separated labels to attach '
'to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
elif len(args) > 1:
parser.error('Only one file may be specified.')
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload_find_auth(file_path, options.project,
options.summary, labels,
options.user, options.password)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
return 0
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
return 1
if __name__ == '__main__':
sys.exit(main())
|
mit
|
sarvex/tensorflow
|
tensorflow/python/kernel_tests/functional_ops_test.py
|
9
|
47969
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.functional_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import def_function as eager_def_function
from tensorflow.python.eager import function as eager_function
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_functional_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.util import compat
# pylint: disable=invalid-name
def simple_scoped_fn(a, x):
"""Simple function: (a, x) -> 2(x+a), but with "2" as a variable in scope."""
with variable_scope.variable_scope("body"):
# Dummy variable, just to check that scoping works as intended.
two = variable_scope.get_variable(
"two", [],
dtype=dtypes.int32,
initializer=init_ops.constant_initializer(2))
return math_ops.multiply(math_ops.add(a, x), two)
@test_util.with_control_flow_v2
class FunctionalOpsTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testFoldl_Simple(self):
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldl(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems)
self.assertAllEqual(208, self.evaluate(r))
r = functional_ops.foldl(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems,
initializer=10)
self.assertAllEqual(880, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testFoldl_SingleInputMultiOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array([1, -1.0])
r = functional_ops.foldl(lambda a, x: a + x, elems, initializer)
r_value = self.evaluate(r)
self.assertAllEqual(22, r_value[0])
self.assertAllEqual(20, r_value[1])
@test_util.run_in_graph_and_eager_modes
def testFoldl_MultiInputSingleOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
r = functional_ops.foldl(lambda a, x: a + x[0] + x[1], (elems, -elems),
initializer)
self.assertAllEqual(1, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testFoldl_MultiInputDifferentDimsSingleOutput(self):
elems = np.array([[1.0, 1.0, 1.0], [2.0, 3.0, 4.0]])
other_elems = np.array([-1.0, 1.0])
initializer = np.array([0.0, 0.0, 0.0])
r = functional_ops.foldl(lambda a, x: a + x[0] * x[1],
(elems, other_elems), initializer)
self.assertAllEqual([1.0, 2.0, 3.0], self.evaluate(r))
@test_util.run_deprecated_v1
def testFoldl_Scoped(self):
with self.cached_session() as sess:
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldl(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
self.assertAllEqual(208, self.evaluate(r))
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.foldl(simple_scoped_fn, elems, initializer=10)
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(880, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testFoldr_Simple(self):
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldr(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems)
self.assertAllEqual(450, self.evaluate(r))
r = functional_ops.foldr(
lambda a, x: math_ops.multiply(math_ops.add(a, x), 2),
elems,
initializer=10)
self.assertAllEqual(1282, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testFoldr_SingleInputMultiOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array([1, -1.0])
r = functional_ops.foldr(lambda a, x: a + x, elems, initializer)
r_value = self.evaluate(r)
self.assertAllEqual(22, r_value[0])
self.assertAllEqual(20, r_value[1])
@test_util.run_in_graph_and_eager_modes
def testFoldr_MultiInputSingleOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
r = functional_ops.foldr(lambda a, x: a + x[0] + x[1], (elems, -elems),
initializer)
self.assertAllEqual(1, self.evaluate(r))
@test_util.run_deprecated_v1
def testFoldr_Scoped(self):
with self.cached_session() as sess:
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.foldr(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
self.assertAllEqual(450, self.evaluate(r))
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.foldr(simple_scoped_fn, elems, initializer=10)
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertAllEqual(1282, self.evaluate(r))
# pylint: disable=unnecessary-lambda
@test_util.run_deprecated_v1
def testFold_Grad(self):
with self.cached_session():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
r = functional_ops.foldl(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(720.0, self.evaluate(r))
r = functional_ops.foldr(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(720.0, self.evaluate(r))
# pylint: enable=unnecessary-lambda
@test_util.run_in_graph_and_eager_modes
def testScan_Simple(self):
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
# pylint: disable=unnecessary-lambda
r = functional_ops.scan(lambda a, x: math_ops.multiply(a, x), elems)
self.assertAllEqual([1., 2., 6., 24., 120., 720.], self.evaluate(r))
r = functional_ops.scan(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
self.assertAllEqual([2., 4., 12., 48., 240., 1440.], self.evaluate(r))
# pylint: enable=unnecessary-lambda
@test_util.run_in_graph_and_eager_modes
def testScan_Reverse(self):
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
# pylint: disable=unnecessary-lambda
r = functional_ops.scan(lambda a, x: math_ops.multiply(a, x), elems,
reverse=True)
self.assertAllEqual([720., 720., 360., 120., 30., 6.], self.evaluate(r))
r = functional_ops.scan(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v,
reverse=True)
self.assertAllEqual([1440., 1440., 720., 240., 60., 12.],
self.evaluate(r))
# pylint: enable=unnecessary-lambda
@test_util.run_in_graph_and_eager_modes
def testScan_SingleInputMultiOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = (np.array(1.0), np.array(-1.0))
r = functional_ops.scan(lambda a, x: (a[0] * x, -a[1] * x), elems,
initializer)
r_value = self.evaluate(r)
self.assertAllEqual([1.0, 2.0, 6.0, 24.0, 120.0, 720.0], r_value[0])
self.assertAllEqual([1.0, -2.0, 6.0, -24.0, 120.0, -720.0], r_value[1])
@test_util.run_in_graph_and_eager_modes
def testScan_MultiInputSingleOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
# Multiply a * 1 each time
r = functional_ops.scan(lambda a, x: a * (x[0] + x[1]),
(elems + 1, -elems), initializer)
self.assertAllEqual([1.0, 1.0, 1.0, 1.0, 1.0, 1.0], self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testScan_MultiInputSameTypeOutput(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
r = functional_ops.scan(lambda a, x: (a[0] + x[0], a[1] + x[1]),
(elems, -elems))
r_value = self.evaluate(r)
self.assertAllEqual(np.cumsum(elems), r_value[0])
self.assertAllEqual(np.cumsum(-elems), r_value[1])
@test_util.run_in_graph_and_eager_modes
def testScan_MultiOutputMismatchedInitializer(self):
elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
initializer = np.array(1.0)
# Multiply a * 1 each time
with self.assertRaisesRegex(
ValueError, "two structures don't have the same nested structure"):
functional_ops.scan(lambda a, x: (a, -a), elems, initializer)
@test_util.run_deprecated_v1
def testScan_Scoped(self):
with self.cached_session() as sess:
with variable_scope.variable_scope("root") as varscope:
elems = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
r = functional_ops.scan(simple_scoped_fn, elems)
# Check that we have the one variable we asked for here.
self.assertEqual(len(variables.trainable_variables()), 1)
self.assertEqual(variables.trainable_variables()[0].name,
"root/body/two:0")
sess.run([variables.global_variables_initializer()])
results = np.array([1, 6, 18, 44, 98, 208])
self.assertAllEqual(results, self.evaluate(r))
# Now let's reuse our single variable.
varscope.reuse_variables()
r = functional_ops.scan(simple_scoped_fn, elems, initializer=2)
self.assertEqual(len(variables.trainable_variables()), 1)
results = np.array([6, 16, 38, 84, 178, 368])
self.assertAllEqual(results, self.evaluate(r))
@test_util.run_in_graph_and_eager_modes
def testScanFoldl_Nested(self):
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0], name="data")
inner_elems = constant_op.constant([0.5, 0.5], name="data")
def r_inner(a, x):
return functional_ops.foldl(
lambda b, y: b * y * x, inner_elems, initializer=a)
r = functional_ops.scan(r_inner, elems)
# t == 0 (returns 1)
# t == 1, a == 1, x == 2 (returns 1)
# t_0 == 0, b == a == 1, y == 0.5, returns b * y * x = 1
# t_1 == 1, b == 1, y == 0.5, returns b * y * x = 1
# t == 2, a == 1, x == 3 (returns 1.5*1.5 == 2.25)
# t_0 == 0, b == a == 1, y == 0.5, returns b * y * x = 1.5
# t_1 == 1, b == 1.5, y == 0.5, returns b * y * x = 1.5*1.5
# t == 3, a == 2.25, x == 4 (returns 9)
# t_0 == 0, b == a == 2.25, y == 0.5, returns b * y * x = 4.5
# t_1 == 1, b == 4.5, y == 0.5, returns b * y * x = 9
self.assertAllClose([1., 1., 2.25, 9.], self.evaluate(r))
@test_util.run_deprecated_v1
def testScan_Control(self):
with self.cached_session() as sess:
s = array_ops.placeholder(dtypes.float32, shape=[None])
b = array_ops.placeholder(dtypes.bool)
with ops.control_dependencies([b]):
c = functional_ops.scan(lambda a, x: x * a, s)
self.assertAllClose(
np.array([1.0, 3.0, 9.0]), sess.run(c, {s: [1, 3, 3],
b: True}))
@test_util.run_deprecated_v1
def testScan_Grad(self):
with self.cached_session():
elems = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
v = constant_op.constant(2.0, name="v")
# pylint: disable=unnecessary-lambda
r = functional_ops.scan(
lambda a, x: math_ops.multiply(a, x), elems, initializer=v)
# pylint: enable=unnecessary-lambda
r = gradients_impl.gradients(r, v)[0]
self.assertAllEqual(873.0, self.evaluate(r))
@test_util.run_deprecated_v1
def testScanGradientWithPartStopGradient(self):
a = variables.Variable(0.0, name="a")
b = variables.Variable(0.0, name="b")
elems = array_ops.zeros(5)
l0, l1 = functional_ops.scan(
lambda elem_, input_: (a, b), elems, initializer=(0., 0.))
loss = l0 + array_ops.stop_gradient(l1)
grad = gradients_impl.gradients(ys=[loss], xs=[a, b])
with self.test_session():
self.evaluate(variables.global_variables_initializer())
self.evaluate(grad)
@test_util.run_in_graph_and_eager_modes
def testFoldShape(self):
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
def fn(_, current_input):
return current_input
initializer = constant_op.constant([0, 0, 0])
y = functional_ops.foldl(fn, x, initializer=initializer)
self.assertAllEqual(y.get_shape(), self.evaluate(y).shape)
@test_util.run_in_graph_and_eager_modes
def testScanShape(self):
x = constant_op.constant([[1, 2, 3], [4, 5, 6]])
def fn(_, current_input):
return current_input
initializer = constant_op.constant([0, 0, 0])
y = functional_ops.scan(fn, x, initializer=initializer)
self.assertAllEqual(y.get_shape(), self.evaluate(y).shape)
# TODO(akshayka): this test fails in eager: the iterable is of length 0 so
# so the body of the while loop never executes
@test_util.run_deprecated_v1
def testScanEmptyTensor(self):
with self.cached_session():
x = functional_ops.scan(
lambda x, _: x, math_ops.range(0), initializer=array_ops.ones([2, 4]))
self.assertAllEqual([0, 2, 4], x.get_shape())
self.assertAllEqual(x.get_shape(), self.evaluate(x).shape)
@test_util.run_deprecated_v1
def testScanUnknownShape(self):
x = array_ops.placeholder(dtypes.float32)
initializer = array_ops.placeholder(dtypes.float32)
def fn(_, current_input):
return current_input
y = functional_ops.scan(fn, x, initializer=initializer)
self.assertIs(None, y.get_shape().dims)
@test_util.run_deprecated_v1
def testScanVaryingShape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtype=dtypes.float32, shape=[None, 2])
x_t = array_ops.transpose(x)
# scan over dimension 0 (with shape None)
result = functional_ops.scan(lambda a, x: a + x, x)
# scanned over transposed dimension 0 (with shape 2)
result_t = functional_ops.scan(lambda a, x: a + x, x_t, infer_shape=False)
# ensure gradients can be calculated
result_grad = gradients_impl.gradients(result, [x])[0]
result_t_grad = gradients_impl.gradients(result_t, [x_t])[0]
# smoke test to ensure they all evaluate
sess.run([result, result_t, result_grad, result_t_grad],
feed_dict={x: [[1.0, 2.0]]})
@test_util.run_deprecated_v1
def testRemoteFunction(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 2
worker, _ = test_util.create_local_cluster(
1, 1, worker_config=worker_config)
@function.Defun(dtypes.int32, dtypes.int32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:ps/task:0"):
a = variables.Variable(2, dtype=dtypes.int32)
b = variables.Variable(3, dtype=dtypes.int32)
with ops.device("/job:worker/replica:0/task:0/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.int32],
f=_remote_fn,
target="/job:worker/replica:0/task:0/cpu:1")
with session.Session(worker[0].target) as sess:
self.evaluate(variables.global_variables_initializer())
mul = self.evaluate(remote_op)
self.assertEqual(mul, [6])
@test_util.run_deprecated_v1
def testRemoteFunctionDirectSession(self):
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 2
@function.Defun(dtypes.int32, dtypes.int32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
a = variables.Variable(2, dtype=dtypes.int32)
b = variables.Variable(3, dtype=dtypes.int32)
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.int32],
f=_remote_fn,
target="/job:localhost/replica:0/task:0/cpu:1")
with self.test_session(config=worker_config) as sess:
self.evaluate(variables.global_variables_initializer())
mul = self.evaluate(remote_op)
self.assertEqual(mul, [6])
@test_util.run_deprecated_v1
def testRemoteFunctionSameDeviceDirectSession(self):
@function.Defun(dtypes.int32, dtypes.int32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/cpu:0"):
a = variables.Variable(2, dtype=dtypes.int32)
b = variables.Variable(3, dtype=dtypes.int32)
with ops.device("/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b], Tout=[dtypes.int32], f=_remote_fn, target="/cpu:0")
with self.cached_session() as sess:
self.evaluate(variables.global_variables_initializer())
mul = self.evaluate(remote_op)
self.assertEqual(mul, [6])
@test_util.run_deprecated_v1
def testRemoteFunctionCPUGPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
@function.Defun(dtypes.float32, dtypes.float32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
a = variables.Variable(2, dtype=dtypes.float32)
b = variables.Variable(3, dtype=dtypes.float32)
with ops.device("/job:localhost/replica:0/task:0/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.float32],
f=_remote_fn,
target="/job:localhost/replica:0/task:0/device:GPU:0")[0] + 3.0
with self.cached_session() as sess:
self.evaluate(variables.global_variables_initializer())
mul = self.evaluate(remote_op)
self.assertEqual(mul, 9.0)
@test_util.run_deprecated_v1
def testRemoteFunctionGPUCPU(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
@function.Defun(dtypes.float32, dtypes.float32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:localhost/replica:0/task:0/device:GPU:0"):
a = variables.Variable(2, dtype=dtypes.float32)
b = variables.Variable(3, dtype=dtypes.float32)
with ops.device("/job:localhost/replica:0/task:0/device:GPU:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.float32],
f=_remote_fn,
target="/job:localhost/replica:0/task:0/cpu:0")[0] + 3.0
with self.cached_session() as sess:
self.evaluate(variables.global_variables_initializer())
mul = self.evaluate(remote_op)
self.assertEqual(mul, 9.0)
@test_util.run_deprecated_v1
def testRemoteFunctionGPUCPUStrings(self):
if not test_util.is_gpu_available():
self.skipTest("No GPU available")
@function.Defun(dtypes.string)
def _remote_fn(inp):
return array_ops.identity(inp)
a = array_ops.constant("a")
with ops.device("/gpu:0"):
remote_op = functional_ops.remote_call(
args=[a], Tout=[dtypes.string], f=_remote_fn, target="/cpu:0")
with self.cached_session() as sess:
ret = self.evaluate(remote_op)
self.assertAllEqual(ret, [b"a"])
@test_util.run_deprecated_v1
def testRemoteFunctionCrossProcess(self):
workers, _ = test_util.create_local_cluster(2, 1)
@function.Defun(dtypes.float32, dtypes.float32)
def _remote_fn(a, b):
return math_ops.multiply(a, b)
with ops.device("/job:ps/task:0"):
a = variables.Variable(2, dtype=dtypes.float32)
b = variables.Variable(3, dtype=dtypes.float32)
with ops.device("/job:worker/replica:0/task:0/cpu:0"):
remote_op = functional_ops.remote_call(
args=[a, b],
Tout=[dtypes.float32],
f=_remote_fn,
target="/job:worker/replica:0/task:1/cpu:0")[0] + 3.0
with session.Session(workers[0].target) as sess:
self.evaluate(variables.global_variables_initializer())
mul = self.evaluate(remote_op)
self.assertEqual(mul, 9)
@test_util.run_deprecated_v1
def testIf(self):
@function.Defun(dtypes.float32)
def Twice(x):
return x * 2
@function.Defun(dtypes.float32)
def Thrice(x):
return x * 3 + 1
with self.test_session(use_gpu=False) as sess:
x = array_ops.placeholder(dtypes.float32)
ret = functional_ops.If(math_ops.greater(x, 0), [x], Twice, Thrice)[0]
self.assertAllEqual(sess.run(ret, feed_dict={x: 9.}), 18.)
self.assertAllEqual(sess.run(ret, feed_dict={x: -8.}), -23.)
self.assertAllEqual(sess.run(ret, feed_dict={x: 0.}), 1.)
def testWhile(self):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.float32] * 2)
def Cond(n, unused_x):
return n > 0
@function.Defun(*[dtypes.float32] * 2)
def Body(n, x):
return n - 1, x + n
def Run(sess, n):
return sess.run(functional_ops.While([n, 0.], Cond, Body))[1]
with self.session(graph=g, use_gpu=use_gpu) as sess:
self.assertAllEqual(Run(sess, 20.), 210.)
self.assertAllEqual(Run(sess, 100.), 5050.)
def testToBool(self):
# For 0D tensors, the truthiness depends on whether the value is "zero".
self.assertAllEqual(gen_functional_ops.to_bool(0), False)
self.assertAllEqual(gen_functional_ops.to_bool(1), True)
self.assertAllEqual(gen_functional_ops.to_bool(42), True)
self.assertAllEqual(gen_functional_ops.to_bool(0.), False)
self.assertAllEqual(gen_functional_ops.to_bool(1.), True)
self.assertAllEqual(gen_functional_ops.to_bool(42.), True)
self.assertAllEqual(gen_functional_ops.to_bool(False), False)
self.assertAllEqual(gen_functional_ops.to_bool(True), True)
# For strings, "zero" is the empty string.
self.assertAllEqual(gen_functional_ops.to_bool(""), False)
self.assertAllEqual(gen_functional_ops.to_bool("a"), True)
# For >0D tensors, the truthiness only depends on whether there are
# elements or not.
self.assertAllEqual(gen_functional_ops.to_bool([]), False)
self.assertAllEqual(gen_functional_ops.to_bool([[]]), False)
self.assertAllEqual(gen_functional_ops.to_bool([[[]]]), False)
self.assertAllEqual(gen_functional_ops.to_bool([0]), True)
self.assertAllEqual(gen_functional_ops.to_bool([1]), True)
self.assertAllEqual(gen_functional_ops.to_bool([[0]]), True)
self.assertAllEqual(gen_functional_ops.to_bool([False]), True)
self.assertAllEqual(gen_functional_ops.to_bool([True]), True)
# Like above, but using int32 in order to ensure that int32 tensors don't get
# copied to the GPU during the application of the while.
def testWhileInt32(self):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.int32] * 2)
def Cond(n, unused_x):
return n > 0
@function.Defun(*[dtypes.int32] * 2)
def Body(n, x):
return n - 1, x + n
def Run(sess, n):
return sess.run(functional_ops.While([n, 0], Cond, Body))[1]
with self.session(graph=g, use_gpu=True) as sess:
self.assertAllEqual(Run(sess, 20), 210)
self.assertAllEqual(Run(sess, 100), 5050)
@test_util.run_deprecated_v1
def testWhileLowering(self):
def Run(n, fetch_by_name):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.float32] * 2)
def Cond(n, unused_x):
return n > 0
@function.Defun(*[dtypes.float32] * 2)
def Body(n, x):
return n - 1, x + n
# outputs: [0, n*(n+1)/2]
outputs = functional_ops.While([n, 0.], Cond, Body, name="my_while")
# `outputs` is the list of output tensors of the While op. We
# arbitrarily choose the 0th tensor to get the While op and set the
# lowering attribute on it.
outputs[0].op._set_attr("_lower_using_switch_merge",
attr_value_pb2.AttrValue(b=True))
if not fetch_by_name:
fetch = outputs[1]
else:
fetch = "my_while:1"
with self.session(graph=g, use_gpu=use_gpu) as sess:
return self.evaluate(fetch)
self.assertAllEqual(Run(20., False), 210.)
self.assertAllEqual(Run(20., True), 210.)
self.assertAllEqual(Run(100., False), 5050.)
self.assertAllEqual(Run(100., True), 5050.)
@test_util.run_v1_only("b/120545219")
@test_util.disable_xla("b/123337890") # Different error message
def testWhileError(self):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.float32] * 2)
def Cond(n, unused_x):
return n > 0
@function.Defun(*[dtypes.float32] * 2)
def CondReturnsTooManyArgs(n, x):
return n > 0, x
@function.Defun(*[dtypes.float32] * 2)
def Body(n, x):
return n - 1, x + n
@function.Defun(*[dtypes.float32] * 2)
def BodyReturnsTooManyArgs(n, x):
return n - 1, x + n, x
with self.session(graph=g, use_gpu=use_gpu):
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Expected a single scalar.*got 2 tensors."):
functional_ops.While([5., 0.], CondReturnsTooManyArgs,
Body)[0].eval()
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"While loop body returned 3 arguments. Expected: 2"):
functional_ops.While([5., 0.], Cond,
BodyReturnsTooManyArgs)[0].eval()
def testWhileInMultipleSubgraphs(self):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
@function.Defun(*[dtypes.float32] * 2)
def Cond(n, x): # pylint: disable=unused-argument
return n > 0
@function.Defun(*[dtypes.float32] * 2)
def Body(n, x):
return n - 1, x + n
with self.session(graph=g, use_gpu=use_gpu) as sess:
n = array_ops.placeholder(dtypes.float32)
_, result = functional_ops.While([n, 0.], Cond, Body)
c = constant_op.constant(37.)
self.assertAllEqual(210., sess.run(result, feed_dict={n: 20.}))
self.assertAllEqual(5050., sess.run(result, feed_dict={n: 100.}))
# Test that the result is the same when we run a different subgraph.
self.assertAllEqual(5050.,
sess.run([result, c], feed_dict={n: 100.})[0])
# pylint: disable=cell-var-from-loop
def testWhileCapturedInputs(self):
for use_gpu in (True, False):
with ops.Graph().as_default() as g:
v = variables.Variable(1.0)
def TestCond(n, *args):
del args
return n < 10
@function.Defun(*[dtypes.float32] * 2)
def TestUnary(n, x):
return math_ops.add(n, 1), x + n + v
@function.Defun(*[dtypes.float32] * 3)
def TestBinary(n, x, x2):
return math_ops.add(n, 1), x + n + v, x2 + v
with self.session(graph=g, use_gpu=use_gpu) as sess:
result_unary = functional_ops.While(
[1.0, 0.],
function.Defun(*[dtypes.float32] * 2)(TestCond), TestUnary)
result_binary = functional_ops.While(
[1.0, 0., 0.],
function.Defun(*[dtypes.float32] * 3)(TestCond), TestBinary)
self.evaluate(variables.global_variables_initializer())
assert len(result_unary) == 2
self.assertEqual([10.0, 54.0], self.evaluate(result_unary))
assert len(result_binary) == 3
self.assertEqual([10.0, 54.0, 9.0], self.evaluate(result_binary))
def TestCondCapture(n, *args):
del args
return math_ops.cast(n, dtypes.float32) + v < 10
with self.assertRaises(ValueError):
_ = functional_ops.While(
[1],
function.Defun(dtypes.int32)(TestCondCapture),
function.Defun(dtypes.int32, dtypes.float32)(TestUnary))
# pylint: enable=cell-var-from-loop
def _tfSum(self, use_gpu, rewrite_with_while):
with ops.Graph().as_default() as g:
with self.session(graph=g, use_gpu=use_gpu) as sess:
@function.Defun(dtypes.int32, dtypes.float32)
def Body(n, x):
return x + math_ops.cast(n, dtypes.float32)
xs = [
# 1 + 2 + ... + 20
functional_ops.For(
1, 21, 1, [0.], Body, rewrite_with_while=rewrite_with_while)[0],
# 100 + 99 + ... + 1
functional_ops.For(
100, 0, -1, [0.], Body, rewrite_with_while=rewrite_with_while)
[0],
]
xvals = self.evaluate(xs)
self.assertAllEqual(210, xvals[0])
self.assertAllEqual(5050, xvals[1])
def testFor(self):
for use_gpu in (True, False):
self._tfSum(use_gpu, False)
def testForWithWhile(self):
for use_gpu in (True, False):
self._tfSum(use_gpu, True)
def testForWithWhileNaming(self):
g = ops.Graph()
with g.as_default():
@function.Defun(dtypes.int32, dtypes.float32, func_name="TestBody")
def TestBody(n, x):
return x + math_ops.cast(n, dtypes.float32)
_ = functional_ops.For(
1, 21, 1, [0.], TestBody, rewrite_with_while=True)[0]
names = []
for func in g.as_graph_def().library.function:
names.append(func.signature.name)
self.assertTrue("TestBody" in names)
self.assertTrue("TestBody_Cond" in names)
self.assertTrue("TestBody_Body" in names)
@test_util.run_deprecated_v1
def testForCapturedInputs(self):
v = variables.Variable(1.0)
@function.Defun(dtypes.int32)
def TestNullary(n):
v + math_ops.cast(n, dtypes.float32) # pylint: disable=expression-not-assigned
@function.Defun(dtypes.int32, dtypes.float32)
def TestUnary(n, x):
return x + math_ops.cast(n, dtypes.float32) + v
@function.Defun(dtypes.int32, dtypes.float32, dtypes.float32)
def TestBinary(n, x, x2):
return x + math_ops.cast(n, dtypes.float32) + v, x2 + v
for rewrite_with_while in (True, False):
use_gpu = not rewrite_with_while
with self.test_session(use_gpu=use_gpu) as sess:
result_nullary = functional_ops.For(
1, 10, 1, [], TestNullary,
rewrite_with_while=rewrite_with_while)
result_unary = functional_ops.For(
1, 10, 1, [0.], TestUnary,
rewrite_with_while=rewrite_with_while)
result_binary = functional_ops.For(
1, 10, 1, [0., 0.], TestBinary,
rewrite_with_while=rewrite_with_while)
self.evaluate(variables.global_variables_initializer())
assert not result_nullary
# The nullary variant doesn't return anything so we can't easily run it.
# As a total hack, fetch the operation by name and run it.
sess.run(ops.get_default_graph().get_operation_by_name(
"While" if rewrite_with_while else "For"))
assert len(result_unary) == 1
self.assertEqual([54.0], self.evaluate(result_unary))
assert len(result_binary) == 2
self.assertEqual([54.0, 9.0], self.evaluate(result_binary))
def _tfMLP(self, xval, wsval, bsval, rewrite_with_while):
# On GPU, don't rewrite using a while loop.
use_gpu = not rewrite_with_while
with self.test_session(use_gpu=use_gpu):
@function.Defun(dtypes.int32, *[dtypes.float64] * 3)
def MLP(i, a, ws, bs):
a = math_ops.tanh(math_ops.matmul(a, ws[i, :]) + bs[i, :])
return a, ws, bs
ret = functional_ops.For(
0,
wsval.shape[0],
1, [xval, wsval, bsval],
MLP,
rewrite_with_while=rewrite_with_while)[0]
return self.evaluate(ret)
def _npMLP(self, xval, wsval, bsval):
for i in range(wsval.shape[0]):
xval = np.tanh(np.dot(xval, wsval[i, :]) + bsval[i, :])
return xval
def _testForMLP(self, rewrite_with_while):
# We construct a 5-layer Multi-Layer Perceptron network here.
# Each layer have the same number of hidden unites (3), and the
# activation function is tanh(). We feed the input (xval) with
# batch size 2.
xval = np.random.normal(size=(2, 3))
wsval = np.random.normal(size=(5, 3, 3))
bsval = np.random.normal(size=(5, 3))
np_ans = self._npMLP(xval, wsval, bsval)
tf_for_ans = self._tfMLP(xval, wsval, bsval, rewrite_with_while)
self.assertAllClose(np_ans, tf_for_ans)
@test_util.run_deprecated_v1
def testForMLP(self):
self._testForMLP(False)
@test_util.run_deprecated_v1
@test_util.disable_xla(
"Test uses strided slice without compile time constant values")
def testForMLPWhile(self):
self._testForMLP(True)
@test_util.run_v1_only("b/120545219")
def testForError(self):
@function.Defun(dtypes.int32, dtypes.float32)
def Foo(i, v):
return math_ops.cast(i, dtypes.float32) + v
@function.Defun(dtypes.int32, dtypes.float32)
def ReturnsTooManyArgs(unused_i, v):
return v, v
with self.test_session():
with self.assertRaisesRegex(errors.InvalidArgumentError,
"must be a scalar"):
functional_ops.For([0], 10, 1, [0.0], Foo)[0].eval()
with self.assertRaisesRegex(errors.InvalidArgumentError,
"Invalid start/limit/delta"):
functional_ops.For(0, 10, -1, [0.0], Foo)[0].eval()
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"For loop body returned 2 arguments. Expected: 1"):
functional_ops.For(0, 10, 1, [0.0], ReturnsTooManyArgs)[0].eval()
@test_util.run_deprecated_v1
def testGradient(self):
@function.Defun(dtypes.float32)
def Poly(x):
# y = 2x^3+3x^2+4x+8
return 2 * x * x * x + 3 * x * x + 4 * x + 8
@function.Defun(dtypes.float32)
def Grad(x):
# dy/dx = dy/dy * dy/dx = 1.0 * (6x^2+6x+4)
return functional_ops.Gradient([x, 1.0], Poly)[0]
with self.test_session(use_gpu=False) as sess:
a = constant_op.constant(0.)
avals = [Poly(a), Grad(a)]
b = constant_op.constant(1.)
bvals = [Poly(b), Grad(b)]
self.assertAllEqual(self.evaluate(avals), [8., 4.])
self.assertAllEqual(self.evaluate(bvals), [17., 16.])
# TODO(akshayka): Replace `function.Defun` with tf.contrib.eager.defun` in the
# below test cases.
class PartitionedCallTest(test.TestCase):
@test_util.run_deprecated_v1
def testRemoteDeviceInPartitionedCallOp(self):
workers, _ = test_util.create_local_cluster(2, 0)
worker0_device = "/job:worker/replica:0/task:0/cpu:0"
worker1_device = "/job:worker/replica:0/task:1/cpu:0"
@eager_def_function.function
def f(a, b):
return a + b
with session.Session(workers[0].target) as sess:
with ops.device(worker0_device):
a = variable_scope.get_variable(
"a", initializer=constant_op.constant(1.), use_resource=True)
with ops.device(worker1_device):
b = variable_scope.get_variable(
"b", initializer=constant_op.constant(1.), use_resource=True)
sess.run(variables.global_variables_initializer())
config = config_pb2.ConfigProto()
config.share_cluster_devices_in_session = True
with session.Session(workers[0].target, config=config) as sess:
res = sess.run(f(a, b))
self.assertEqual(res, 2)
@test_util.run_deprecated_v1
def testBasicSingleDevice(self):
@function.Defun(*[dtypes.float32] * 2)
def Body(x, y):
with ops.device("/cpu:0"):
a = x + x
b = y + y
return a + b
output, = self.evaluate(
functional_ops.partitioned_call(
args=[constant_op.constant(1.),
constant_op.constant(2.)], f=Body))
self.assertEqual(output, 6.)
@test_util.run_deprecated_v1
def testBasicMultiDevice(self):
config = config_pb2.ConfigProto(device_count={"CPU": 3})
@function.Defun(*[dtypes.float32] * 2)
def Body(x, y):
# if x = 1, y = 2, ...
with ops.device("/cpu:0"):
# a:= 1 + 1 = 2
a = x + x
with ops.device("/cpu:1"):
# b:= 2 + 2 = 4
b = a + y
with ops.device("/cpu:2"):
# c:= 2 + 4 = 6
c = a + b
# a + b + c = 2 + 4 + 6 = 12
return a + b + c
with self.test_session(config=config):
output, = functional_ops.partitioned_call(
args=[constant_op.constant(1.),
constant_op.constant(2.)], f=Body)
self.assertEqual(self.evaluate(output), 12.)
@test_util.run_deprecated_v1
def testBasicMultiDeviceGPU(self):
if not test_util.is_gpu_available():
return
@function.Defun(*[dtypes.float32] * 2)
def Body(x, y):
with ops.device("/gpu:0"):
a = x + x
b = y + y
with ops.device("/cpu:0"):
c = a + b
return c
output, = self.evaluate(
functional_ops.partitioned_call(
args=[constant_op.constant(1.),
constant_op.constant(2.)], f=Body))
self.assertEqual(output, 6.)
@test_util.run_deprecated_v1
def testBasicNoDeviceAnnotations(self):
@function.Defun(*[dtypes.float32] * 2)
def Body(x, y):
a = x + x
b = y + y
return a + b
output, = self.evaluate(
functional_ops.partitioned_call(
args=[constant_op.constant(1.),
constant_op.constant(2.)], f=Body))
self.assertEqual(output, 6.)
@test_util.run_deprecated_v1
def testShardsRunOnRequestedDevices(self):
config = config_pb2.ConfigProto(device_count={"CPU": 4})
@function.Defun()
def Body():
# Serialize DT_RESOURCE handles as DT_STRINGs, which encode the device on
# which the resource was created, so that we can verify that ops were
# actually run on the requested devices.
#
# TODO(akshayka): Provide a cleaner, more idiomatic API for obtaining the
# name of the device on which a resource lives / for determining the
# device on which an op ran.
with ops.device("/cpu:0"):
s1 = iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
with ops.device("/cpu:1"):
s2 = iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
with ops.device("/cpu:2"):
s3 = iterator_ops.Iterator.from_structure(
(dtypes.float32,)).string_handle()
return s1, s2, s3
with self.test_session(config=config, use_gpu=True) as sess:
outputs = sess.run(functional_ops.partitioned_call(args=[], f=Body))
self.assertIn(compat.as_bytes("CPU:0"), outputs[0])
self.assertIn(compat.as_bytes("CPU:1"), outputs[1])
self.assertIn(compat.as_bytes("CPU:2"), outputs[2])
@test_util.run_deprecated_v1
def testAssignAddResourceVariable(self):
v = resource_variable_ops.ResourceVariable(1.0)
@function.Defun()
def AssignAdd():
v.assign_add(1.0)
op = functional_ops.partitioned_call(
args=AssignAdd.captured_inputs, f=AssignAdd)
_ = self.evaluate(variables.global_variables_initializer())
_ = self.evaluate(op)
value = self.evaluate(v.read_value())
self.assertEqual(value, 2.0)
@test_util.run_deprecated_v1
def testFunctionWithResourcesOnDifferentDevices(self):
if not test_util.is_gpu_available():
self.skipTest("No GPUs available.")
with ops.device("/cpu:0"):
v_cpu_zero = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name="v_cpu_zero")
with ops.device("/cpu:1"):
v_cpu_one = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name="v_cpu_one")
with ops.device("/gpu:0"):
v_gpu = resource_variable_ops.ResourceVariable(
[0.0, 1.0, 2.0], name="v_gpu")
def sum_gather():
cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu_zero, [1, 2]))
also_cpu_result = math_ops.reduce_sum(array_ops.gather(v_cpu_one, [1, 2]))
gpu_result = math_ops.reduce_sum(array_ops.gather(v_gpu, [1, 2]))
return cpu_result, also_cpu_result, gpu_result
defined = function.Defun()(sum_gather)
with self.test_session(
config=config_pb2.ConfigProto(
allow_soft_placement=False,
log_device_placement=True,
device_count={"CPU": 2})) as sess:
self.evaluate(variables.global_variables_initializer())
expected = self.evaluate(sum_gather())
result = sess.run(
functional_ops.partitioned_call(
args=defined.captured_inputs, f=defined))
self.assertAllEqual(expected, result)
# Use an invalid executor name to test the plumbing of the executor_type attr.
@test_util.run_v1_only("b/120545219")
def testExecutorTypeAttrExecutorNotFound(self):
@function.Defun(dtypes.int32)
def AddFive(x):
return x + 5
op = functional_ops.partitioned_call(
args=[constant_op.constant([1, 2, 3], dtype=dtypes.int32)],
f=AddFive,
executor_type="NON_EXISTENT_EXECUTOR")
with self.assertRaisesRegex(errors.NotFoundError, "NON_EXISTENT_EXECUTOR"):
self.evaluate(op)
@test_util.run_all_in_graph_and_eager_modes
@test_util.with_control_flow_v2
class FunctionalOpsCaseTest(test.TestCase):
def testCase(self):
@eager_function.defun
def two(x):
return x * 2
@eager_function.defun
def three(x):
return x * 3
@eager_function.defun
def four(x):
return x * 4
def f(branch, x):
tmpl = array_ops.zeros_like(x)
return array_ops.identity(gen_functional_ops.case(
branch, input=[x], Tout=[dtypes.float32],
branches=[f.get_concrete_function(tmpl)
for f in (two, three, four)])[0])
one = array_ops.ones([])
self.assertAllEqual(np.float32(2), self.evaluate(f(0, one)))
self.assertAllEqual(np.float32(3), self.evaluate(f(1, one)))
self.assertAllEqual(np.float32(4), self.evaluate(f(2, one)))
self.assertAllEqual(np.float32(4), self.evaluate(f(-1, one))) # <0 default
self.assertAllEqual(np.float32(4), self.evaluate(f(6, one))) # >=N default
@test_util.run_deprecated_v1
@test_util.disable_xla("Don't lower for XLA")
def testSkipEagerCaseLoweringPreservesNameForFetch(self):
for use_gpu in (True, False):
def Run(branch, x, fetch_by_name, use_gpu=use_gpu):
with ops.Graph().as_default() as g:
@function.Defun(dtypes.float32)
def two(x):
return -1, x * 2
@function.Defun(dtypes.float32)
def three(x):
return 0, x * 3
@function.Defun(dtypes.float32)
def four(x):
return 1, x * 4
outputs = gen_functional_ops.case(branch, input=[x],
Tout=[dtypes.int32, dtypes.float32],
branches=[two, three, four],
name="my_case")
# `outputs` is the list of output tensors of the Case op. We
# arbitrarily choose the 0th tensor to get the Case op and set the
# lowering attribute on it.
outputs[0].op._set_attr("_lower_using_switch_merge",
attr_value_pb2.AttrValue(b=True))
outputs = array_ops.identity_n(outputs)
with self.session(graph=g, use_gpu=use_gpu) as sess:
return sess.run("my_case:1" if fetch_by_name else outputs[1])
self.assertAllEqual(2 * 1., Run(0, 1., False))
self.assertAllEqual(2 * 1., Run(0, 1., True))
self.assertAllEqual(3 * 7., Run(1, 7., False))
self.assertAllEqual(3 * 7., Run(1, 7., True))
self.assertAllEqual(4 * -3., Run(2, -3., False))
self.assertAllEqual(4 * -3., Run(2, -3., True))
self.assertAllEqual(4 * -4., Run(7, -4., False)) # >= N default
self.assertAllEqual(4 * -4., Run(7, -4., True)) # >= N default
self.assertAllEqual(4 * -5., Run(-1, -5., False)) # <0 default
self.assertAllEqual(4 * -5., Run(-1, -5., True)) # <0 default
@test_util.disable_xla("Don't lower for XLA")
def testCaseLowering(self):
for use_gpu in (True, False):
@eager_function.defun
def Run(branch, x):
@function.Defun(dtypes.float32)
def two(x):
return -1, x * 2
@function.Defun(dtypes.float32)
def three(x):
return 0, x * 3
@function.Defun(dtypes.float32)
def four(x):
return 1, x * 4
outputs = gen_functional_ops.case(branch, input=[x],
Tout=[dtypes.int32, dtypes.float32],
branches=[two, three, four])
# `outputs` is the list of output tensors of the Case op. We
# arbitrarily choose the 0th tensor to get the Case op and set the
# lowering attribute on it.
outputs[0].op._set_attr("_lower_using_switch_merge",
attr_value_pb2.AttrValue(b=True))
outputs = array_ops.identity_n(outputs)
return outputs[1]
with ops.device(test.gpu_device_name() if use_gpu else "CPU:0"):
self.assertAllEqual(2 * 1., self.evaluate(Run(0, 1.)))
self.assertAllEqual(3 * 7., self.evaluate(Run(1, 7.)))
self.assertAllEqual(4 * -3., self.evaluate(Run(2, -3.)))
self.assertAllEqual(4 * -4., self.evaluate(Run(7, -4.))) # >=N default
self.assertAllEqual(4 * -5., self.evaluate(Run(-1, -5.))) # <0 default
if __name__ == "__main__":
test.main()
# pylint: enable=invalid-name
|
apache-2.0
|
pgmillon/ansible
|
lib/ansible/modules/remote_management/redfish/idrac_redfish_command.py
|
12
|
5893
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018 Dell EMC Inc.
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: idrac_redfish_command
version_added: "2.8"
short_description: Manages Out-Of-Band controllers using iDRAC OEM Redfish APIs
description:
- Builds Redfish URIs locally and sends them to remote OOB controllers to
perform an action.
- For use with Dell iDRAC operations that require Redfish OEM extensions
options:
category:
required: true
description:
- Category to execute on OOB controller
type: str
command:
required: true
description:
- List of commands to execute on OOB controller
type: list
baseuri:
required: true
description:
- Base URI of OOB controller
type: str
username:
required: true
description:
- User for authentication with OOB controller
type: str
password:
required: true
description:
- Password for authentication with OOB controller
type: str
timeout:
description:
- Timeout in seconds for URL requests to OOB controller
default: 10
type: int
version_added: '2.8'
author: "Jose Delarosa (@jose-delarosa)"
'''
EXAMPLES = '''
- name: Create BIOS configuration job (schedule BIOS setting update)
idrac_redfish_command:
category: Systems
command: CreateBiosConfigJob
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
'''
RETURN = '''
msg:
description: Message with action result or error description
returned: always
type: str
sample: "Action was successful"
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.redfish_utils import RedfishUtils
from ansible.module_utils._text import to_native
class IdracRedfishUtils(RedfishUtils):
def create_bios_config_job(self):
result = {}
key = "Bios"
jobs = "Jobs"
# Search for 'key' entry and extract URI from it
response = self.get_request(self.root_uri + self.systems_uris[0])
if response['ret'] is False:
return response
result['ret'] = True
data = response['data']
if key not in data:
return {'ret': False, 'msg': "Key %s not found" % key}
bios_uri = data[key]["@odata.id"]
# Extract proper URI
response = self.get_request(self.root_uri + bios_uri)
if response['ret'] is False:
return response
result['ret'] = True
data = response['data']
set_bios_attr_uri = data["@Redfish.Settings"]["SettingsObject"][
"@odata.id"]
payload = {"TargetSettingsURI": set_bios_attr_uri}
response = self.post_request(
self.root_uri + self.manager_uri + "/" + jobs, payload)
if response['ret'] is False:
return response
response_output = response['resp'].__dict__
job_id = response_output["headers"]["Location"]
job_id = re.search("JID_.+", job_id).group()
# Currently not passing job_id back to user but patch is coming
return {'ret': True, 'msg': "Config job %s created" % job_id}
CATEGORY_COMMANDS_ALL = {
"Systems": ["CreateBiosConfigJob"],
"Accounts": [],
"Manager": []
}
def main():
result = {}
module = AnsibleModule(
argument_spec=dict(
category=dict(required=True),
command=dict(required=True, type='list'),
baseuri=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
timeout=dict(type='int', default=10)
),
supports_check_mode=False
)
category = module.params['category']
command_list = module.params['command']
# admin credentials used for authentication
creds = {'user': module.params['username'],
'pswd': module.params['password']}
# timeout
timeout = module.params['timeout']
# Build root URI
root_uri = "https://" + module.params['baseuri']
rf_utils = IdracRedfishUtils(creds, root_uri, timeout, module)
# Check that Category is valid
if category not in CATEGORY_COMMANDS_ALL:
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
# Check that all commands are valid
for cmd in command_list:
# Fail if even one command given is invalid
if cmd not in CATEGORY_COMMANDS_ALL[category]:
module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
# Organize by Categories / Commands
if category == "Systems":
# execute only if we find a System resource
result = rf_utils._find_systems_resource()
if result['ret'] is False:
module.fail_json(msg=to_native(result['msg']))
for command in command_list:
if command == "CreateBiosConfigJob":
# execute only if we find a Managers resource
result = rf_utils._find_managers_resource()
if result['ret'] is False:
module.fail_json(msg=to_native(result['msg']))
result = rf_utils.create_bios_config_job()
# Return data back or fail with proper message
if result['ret'] is True:
del result['ret']
module.exit_json(changed=True, msg='Action was successful')
else:
module.fail_json(msg=to_native(result['msg']))
if __name__ == '__main__':
main()
|
gpl-3.0
|
coolbho3k/kernel-roth
|
tools/perf/scripts/python/futex-contention.py
|
11261
|
1486
|
# futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
|
gpl-2.0
|
imbasimba/astroquery
|
astroquery/skyview/tests/test_skyview.py
|
2
|
2963
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os.path
import types
import pytest
from astropy import coordinates
from astropy import units as u
from ...utils import commons
from ...utils.testing_tools import MockResponse
from ...skyview import SkyView
objcoords = {'Eta Carinae': coordinates.SkyCoord(ra=161.264775 * u.deg,
dec=-59.6844306 * u.deg,
frame='icrs'), }
@pytest.fixture
def patch_fromname(request):
try:
mp = request.getfixturevalue("monkeypatch")
except AttributeError: # pytest < 3
mp = request.getfuncargvalue("monkeypatch")
def fromname(self, name):
if isinstance(name, str):
return objcoords[name]
else:
raise coordinates.name_resolve.NameResolveError
mp.setattr(commons.ICRSCoord,
'from_name',
types.MethodType(fromname, commons.ICRSCoord))
class MockResponseSkyView(MockResponse):
def __init__(self):
super(MockResponseSkyView, self).__init__()
def get_content(self):
return self.content
class MockResponseSkyviewForm(MockResponse):
def __init__(self, method, url, cache=False, params=None, **kwargs):
super(MockResponseSkyviewForm, self).__init__(**kwargs)
self.content = self.get_content(method, url)
def get_content(self, method, url):
if 'basicform.pl' in url and method == 'GET':
with open(data_path('query_page.html'), 'r') as f:
return f.read()
elif 'runquery.pl' in url and method == 'GET':
with open(data_path('results.html'), 'r') as f:
return f.read()
else:
raise ValueError("Invalid method/url passed to "
"Mock Skyview request")
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
@pytest.fixture
def patch_get(request):
try:
mp = request.getfixturevalue("monkeypatch")
except AttributeError: # pytest < 3
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(SkyView, '_request', MockResponseSkyviewForm)
return mp
def test_get_image_list_local(patch_get, patch_fromname):
urls = SkyView.get_image_list(position='Eta Carinae',
survey=['Fermi 5', 'HRI', 'DSS'])
assert len(urls) == 3
for url in urls:
assert url.startswith('../../tempspace/fits/')
def test_survey_validation(patch_get):
with pytest.raises(ValueError) as ex:
SkyView.get_image_list(position='doesnt matter',
survey=['not_a_valid_survey'])
assert str(ex.value) == ("Survey is not among the surveys hosted "
"at skyview. See list_surveys or "
"survey_dict for valid surveys.")
|
bsd-3-clause
|
exu/poligon
|
python/python_koans/python3/koans/about_comprehension.py
|
56
|
2234
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutComprehension(Koan):
def test_creating_lists_with_list_comprehensions(self):
feast = ['lambs', 'sloths', 'orangutans', 'breakfast cereals',
'fruit bats']
comprehension = [delicacy.capitalize() for delicacy in feast]
self.assertEqual(__, comprehension[0])
self.assertEqual(__, comprehension[2])
def test_filtering_lists_with_list_comprehensions(self):
feast = ['spam', 'sloths', 'orangutans', 'breakfast cereals',
'fruit bats']
comprehension = [delicacy for delicacy in feast if len(delicacy) > 6]
self.assertEqual(__, len(feast))
self.assertEqual(__, len(comprehension))
def test_unpacking_tuples_in_list_comprehensions(self):
list_of_tuples = [(1, 'lumberjack'), (2, 'inquisition'), (4, 'spam')]
comprehension = [ skit * number for number, skit in list_of_tuples ]
self.assertEqual(__, comprehension[0])
self.assertEqual(__, comprehension[2])
def test_double_list_comprehension(self):
list_of_eggs = ['poached egg', 'fried egg']
list_of_meats = ['lite spam', 'ham spam', 'fried spam']
comprehension = [ '{0} and {1}'.format(egg, meat) for egg in list_of_eggs for meat in list_of_meats]
self.assertEqual(__, comprehension[0])
self.assertEqual(__, len(comprehension))
def test_creating_a_set_with_set_comprehension(self):
comprehension = { x for x in 'aabbbcccc'}
self.assertEqual(__, comprehension) # remember that set members are unique
def test_creating_a_dictionary_with_dictionary_comprehension(self):
dict_of_weapons = {'first': 'fear', 'second': 'surprise',
'third':'ruthless efficiency', 'forth':'fanatical devotion',
'fifth': None}
dict_comprehension = { k.upper(): weapon for k, weapon in dict_of_weapons.items() if weapon}
self.assertEqual(__, 'first' in dict_comprehension)
self.assertEqual(__, 'FIRST' in dict_comprehension)
self.assertEqual(__, len(dict_of_weapons))
self.assertEqual(__, len(dict_comprehension))
|
mit
|
dermoth/gramps
|
gramps/gui/editors/displaytabs/locationmodel.py
|
11
|
1690
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# GTK libraries
#
#-------------------------------------------------------------------------
from gi.repository import Gtk
#-------------------------------------------------------------------------
#
# Gramps classes
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
#
# LocationModel
#
#-------------------------------------------------------------------------
class LocationModel(Gtk.ListStore):
def __init__(self, obj_list, db):
Gtk.ListStore.__init__(self, str, str, str, str, str, str, object)
self.db = db
for obj in obj_list:
self.append(row=[obj.street, obj.locality, obj.city, obj.county,
obj.state, obj.country, obj, ])
|
gpl-2.0
|
warriorframework/warriorframework
|
warrior/WarriorCore/iterative_parallel_kw_driver.py
|
1
|
4275
|
'''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
#!/usr/bin/python
"""This is iterative parallel keyword driver which is used to execute
the keywords of a testcase in parallel where data_type = iterative"""
import traceback
from collections import OrderedDict
import WarriorCore.testcase_steps_execution as testcase_steps_execution
import Framework.Utils as Utils
from Framework.Utils.print_Utils import print_debug, print_error
from WarriorCore.multiprocessing_utils import create_and_start_process_with_queue, \
get_results_from_queue, update_tc_junit_resultfile
from Framework.Utils import testcase_Utils
def execute_iterative_parallel(step_list, data_repository, tc_status, system_list):
"""Takes a list of steps as input and executes them in parallel by
creating separate process of step_driver for each of these steps """
jobs_list = []
output_q = None
for system_name in system_list:
target_module = testcase_steps_execution.main
#args_list = [step_list, data_repository, system_name, True]
args_dict = OrderedDict([("step_list", step_list),
("data_repository", data_repository),
("system_name", system_name),
("kw_parallel", True),
("output_q", output_q),
])
process, jobs_list, output_q = create_and_start_process_with_queue(target_module, args_dict,
jobs_list, output_q)
print_debug("process: {0}".format(process))
for job in jobs_list:
job.join()
result_list = get_results_from_queue(output_q)
system_status_list = []
system_resultfile_list = []
step_impact_list = []
tc_junit_list = []
for result in result_list:
step_status_list = result[0]
kw_resultfile_list = result[1]
system_name = result[2]
step_impact_list = result[3]
tc_junit_list.append(result[4])
system_status = testcase_Utils.compute_status_using_impact(step_status_list,
step_impact_list)
system_resultfile = testcase_Utils.compute_system_resultfile(kw_resultfile_list,
data_repository['wt_resultsdir'],
system_name)
system_status_list.append(system_status)
system_resultfile_list.append(system_resultfile)
tc_status = Utils.testcase_Utils.compute_status_without_impact(system_status_list)
# parallel keywords generate multiple keyword junit result files
# each files log the result for one keyword and not intergrated
# update testcase junit result file with individual keyword result files
data_repository['wt_junit_object'] = update_tc_junit_resultfile(data_repository['wt_junit_object'], tc_junit_list, data_repository['wt_tc_timestamp'])
print_debug("Updating Testcase result file...")
Utils.testcase_Utils.append_result_files(data_repository['wt_resultfile'], system_resultfile_list)
return tc_status
def main(step_list, data_repository, tc_status, system_list):
"""Executes the list of keyword in iterative parallel fashion
Computes and returns the testcase status"""
try:
testcase_status = execute_iterative_parallel(step_list, data_repository,
tc_status, system_list)
except Exception:
testcase_status = False
print_error('unexpected error {0}'.format(traceback.format_exc()))
return testcase_status
|
apache-2.0
|
ales-erjavec/orange
|
Orange/OrangeCanvas/gui/tests/test_toolgrid.py
|
6
|
2920
|
from PyQt4.QtGui import QAction, QToolButton
from .. import test
from ..toolgrid import ToolGrid
class TestToolGrid(test.QAppTestCase):
def test_tool_grid(self):
w = ToolGrid()
w.show()
self.app.processEvents()
def buttonsOrderedVisual():
# Process layout events so the buttons have right positions
self.app.processEvents()
buttons = w.findChildren(QToolButton)
return sorted(buttons, key=lambda b: (b.y(), b.x()))
def buttonsOrderedLogical():
return map(w.buttonForAction, w.actions())
def assertOrdered():
self.assertSequenceEqual(buttonsOrderedLogical(),
buttonsOrderedVisual())
action_a = QAction("A", w)
action_b = QAction("B", w)
action_c = QAction("C", w)
action_d = QAction("D", w)
w.addAction(action_b)
w.insertAction(0, action_a)
self.assertSequenceEqual(w.actions(),
[action_a, action_b])
assertOrdered()
w.addAction(action_d)
w.insertAction(action_d, action_c)
self.assertSequenceEqual(w.actions(),
[action_a, action_b, action_c, action_d])
assertOrdered()
w.removeAction(action_c)
self.assertSequenceEqual(w.actions(),
[action_a, action_b, action_d])
assertOrdered()
w.removeAction(action_a)
self.assertSequenceEqual(w.actions(),
[action_b, action_d])
assertOrdered()
w.insertAction(0, action_a)
self.assertSequenceEqual(w.actions(),
[action_a, action_b, action_d])
assertOrdered()
w.setColumnCount(2)
self.assertSequenceEqual(w.actions(),
[action_a, action_b, action_d])
assertOrdered()
w.insertAction(2, action_c)
self.assertSequenceEqual(w.actions(),
[action_a, action_b, action_c, action_d])
assertOrdered()
w.clear()
# test no 'before' action edge case
w.insertAction(0, action_a)
self.assertIs(action_a, w.actions()[0])
w.insertAction(1, action_b)
self.assertSequenceEqual(w.actions(),
[action_a, action_b])
w.clear()
w.setActions([action_a, action_b, action_c, action_d])
self.assertSequenceEqual(w.actions(),
[action_a, action_b, action_c, action_d])
assertOrdered()
triggered_actions = []
def p(action):
print action.text()
w.actionTriggered.connect(p)
w.actionTriggered.connect(triggered_actions.append)
action_a.trigger()
w.show()
self.app.exec_()
|
gpl-3.0
|
smices/mWorkerService
|
src/lib/baidubce/http/handler.py
|
2
|
2816
|
# Copyright (c) 2014 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the
# License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
"""
This module provides general http handler functions for processing http responses from BCE services.
"""
import httplib
import json
from baidubce import utils
from baidubce.exception import BceClientError
from baidubce.exception import BceServerError
def parse_json(http_response, response):
"""If the body is not empty, convert it to a python object and set as the value of
response.body. http_response is always closed if no error occurs.
:param http_response: the http_response object returned by HTTPConnection.getresponse()
:type http_response: httplib.HTTPResponse
:param response: general response object which will be returned to the caller
:type response: baidubce.BceResponse
:return: always true
:rtype bool
"""
body = http_response.read()
if body:
response.__dict__.update(json.loads(body, object_hook=utils.dict_to_python_object).__dict__)
http_response.close()
return True
def parse_error(http_response, response):
"""If the body is not empty, convert it to a python object and set as the value of
response.body. http_response is always closed if no error occurs.
:param http_response: the http_response object returned by HTTPConnection.getresponse()
:type http_response: httplib.HTTPResponse
:param response: general response object which will be returned to the caller
:type response: baidubce.BceResponse
:return: false if http status code is 2xx, raise an error otherwise
:rtype bool
:raise baidubce.exception.BceClientError: if http status code is NOT 2xx
"""
if http_response.status / 100 == httplib.OK / 100:
return False
if http_response.status / 100 == httplib.CONTINUE / 100:
raise BceClientError('Can not handle 1xx http status code')
bse = None
body = http_response.read()
if body:
d = json.loads(body)
bse = BceServerError(d['message'], code=d['code'], request_id=d['requestId'])
if bse is None:
bse = BceServerError(http_response.reason, request_id=response.metadata.bce_request_id)
bse.status_code = http_response.status
raise bse
|
mit
|
valentinmetraux/hierophis
|
hierophis/maths/statistics/basic.py
|
1
|
2080
|
#!/usr/bin/env python
# -*- coding: utf 8 -*-
"""
Utility functions.
:copyright: 2015 Agile Geoscience
:license: Apache 2.0
"""
import numpy as np
import scipy.signal
def rms(a):
"""
Calculates the RMS of an array.
:param a: An array.
:returns: The RMS of the array.
"""
return np.sqrt(np.sum(a**2.0)/a.size)
def normalize(a, new_min=0.0, new_max=1.0):
"""
Normalize an array to [0,1] or to
arbitrary new min and max.
:param a: An array.
:param new_min: A float to be the new min, default 0.
:param new_max: A float to be the new max, default 1.
:returns: The normalized array.
"""
n = (a - np.amin(a)) / float(np.amax(a - np.amin(a)))
return n * (new_max - new_min) + new_min
def moving_average(a, method = "convolve", length=9, mode='valid'):
"""
Computes the mean in a moving window.
Methods: naive, fft, convolve
Length: Kernel length
Modes: full, valid, same
"""
if method == "fft":
boxcar = np.ones(length)/length
return scipy.signal.fftconvolve(a, boxcar, mode="valid")
elif method == "convolve":
boxcar = np.ones(length)/length
return np.convolve(a, boxcar, mode="valid")
else:
pad = np.floor(length/2)
if mode == 'full':
pad *= 2
# Make a padded version, paddding with first and last values
r = np.empty(a.shape[0] + 2*pad)
r[:pad] = a[0]
r[pad:-pad] = a
r[-pad:] = a[-1]
# Cumsum with shifting trick
s = np.cumsum(r, dtype=float)
s[length:] = s[length:] - s[:-length]
out = s[length-1:]/length
# Decide what to return
if mode == 'same':
if out.shape[0] != a.shape[0]:
# If size doesn't match, then interpolate.
out = (out[:-1, ...] + out[1:, ...]) / 2
return out
elif mode == 'valid':
return out[pad:-pad]
else: # mode=='full' and we used a double pad
return out
|
apache-2.0
|
kenshay/ImageScripter
|
ProgramData/SystemFiles/Python/Lib/test/sample_doctest.py
|
228
|
1037
|
"""This is a sample module that doesn't really test anything all that
interesting.
It simply has a few tests, some of which succeed and some of which fail.
It's important that the numbers remain constant as another test is
testing the running of these tests.
>>> 2+2
4
"""
def foo():
"""
>>> 2+2
5
>>> 2+2
4
"""
def bar():
"""
>>> 2+2
4
"""
def test_silly_setup():
"""
>>> import test.test_doctest
>>> test.test_doctest.sillySetup
True
"""
def w_blank():
"""
>>> if 1:
... print 'a'
... print
... print 'b'
a
<BLANKLINE>
b
"""
x = 1
def x_is_one():
"""
>>> x
1
"""
def y_is_one():
"""
>>> y
1
"""
__test__ = {'good': """
>>> 42
42
""",
'bad': """
>>> 42
666
""",
}
def test_suite():
import doctest
return doctest.DocTestSuite()
|
gpl-3.0
|
kubaszostak/gdal-dragndrop
|
osgeo/apps/Python27/Lib/encodings/utf_16_be.py
|
103
|
1079
|
""" Python 'utf-16-be' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
encode = codecs.utf_16_be_encode
def decode(input, errors='strict'):
return codecs.utf_16_be_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_16_be_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_16_be_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_16_be_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_16_be_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-16-be',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
mit
|
eXcomm/cjdns
|
node_build/dependencies/libuv/build/gyp/pylib/gyp/generator/ninja.py
|
16
|
89501
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import hashlib
import json
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from cStringIO import StringIO
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
# TODO: figure out how to not build extra host objects in the non-cross-compile
# case when this is enabled, and enable unconditionally.
generator_supports_multiple_toolsets = (
os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
def AddArch(output, arch):
"""Adds an arch string to an output path."""
output, extension = os.path.splitext(output)
return '%s.%s%s' % (output, arch, extension)
class Target:
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here.
self.component_objs = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter:
def __init__(self, qualified_target, target_outputs, base_dir, build_dir,
output_file, toplevel_build, output_file_name, flavor,
toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.qualified_target = qualified_target
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.toplevel_build = toplevel_build
self.output_file_name = output_file_name
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
return None
if len(targets) > 1:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets)
self.ninja.newline()
return targets[0]
def _SubninjaNameForArch(self, arch):
output_file_base = os.path.splitext(self.output_file_name)[0]
return '%s.%s.ninja' % (output_file_base, arch)
def WriteSpec(self, spec, config_name, generator_flags):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
# Track if this target contains any C++ files, to decide if gcc or g++
# should be used for linking.
self.uses_cpp = False
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
if self.flavor == 'mac':
self.archs = self.xcode_settings.GetActiveArchs(config_name)
if len(self.archs) > 1:
self.arch_subninjas = dict(
(arch, ninja_syntax.Writer(
OpenOutput(os.path.join(self.toplevel_build,
self._SubninjaNameForArch(arch)),
'w')))
for arch in self.archs)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = extra_sources + spec.get('sources', [])
if sources:
if self.flavor == 'mac' and len(self.archs) > 1:
# Write subninja file containing compile and link commands scoped to
# a single arch if a fat binary is being built.
for arch in self.archs:
self.ninja.subninja(self._SubninjaNameForArch(arch))
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
self.ninja, config_name, config, sources, compile_depends_stamp, pch,
spec)
# Some actions/rules output 'sources' that are already object files.
obj_outputs = [f for f in sources if f.endswith(self.obj_ext)]
if obj_outputs:
if self.flavor != 'mac' or len(self.archs) == 1:
link_deps += [self.GypPathToNinja(o) for o in obj_outputs]
else:
print "Warning: Actions/rules writing object files don't work with " \
"multiarch targets, dropping. (target %s)" % spec['target_name']
elif self.flavor == 'mac' and len(self.archs) > 1:
link_deps = collections.defaultdict(list)
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
# Write out a link step, if needed.
output = None
is_empty_bundle = not link_deps and not mac_bundle_depends
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
self.target.actions_stamp or actions_depends)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRules(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', [])[:]
else:
mac_bundle_resources = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
mac_bundle_resources,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
self.WriteMacBundleResources(
extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends)
self.WriteMacInfoPlist(mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetSortedXcodeEnv()
if self.flavor == 'win':
env = self.msvs_settings.GetVSMacroEnv(
'$!PRODUCT_DIR', config=self.config_name)
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'],
hashlib.md5(self.qualified_target).hexdigest())
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env=env)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
mac_bundle_resources, extra_mac_bundle_resources):
env = self.GetSortedXcodeEnv()
all_outputs = []
for rule in rules:
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'],
hashlib.md5(self.qualified_target).hexdigest())
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env=env)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if ('${%s}' % var) in argument:
needed_variables.add(var)
def cygwin_munge(path):
if is_cygwin:
return path.replace('\\', '/')
return path
# For each source file, write an edge that generates all the outputs.
for source in rule.get('rule_sources', []):
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
inputs = [self.ExpandRuleVariables(i, root, dirname,
source, ext, basename)
for i in rule.get('inputs', [])]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
was_mac_bundle_resource = source in mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Note: This is n_resources * n_outputs_in_rule. Put to-be-removed
# items in a set and remove them all in a single pass if this becomes
# a performance issue.
if was_mac_bundle_resource:
mac_bundle_resources.remove(source)
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
inputs = [self.GypPathToNinja(i, env) for i in inputs]
outputs = [self.GypPathToNinja(o, env) for o in outputs]
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetSortedXcodeEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource')])
bundle_depends.append(output)
def WriteMacInfoPlist(self, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(
intermediate_plist, 'preprocess_infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
keys = self.xcode_settings.GetExtraPlistItems(self.config_name)
keys = QuoteShellArgument(json.dumps(keys), self.flavor)
self.ninja.build(out, 'copy_infoplist', info_plist,
variables=[('env', env), ('keys', keys)])
bundle_depends.append(out)
def WriteSources(self, ninja_file, config_name, config, sources, predepends,
precompiled_header, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
self.ninja.variable('ldxx', '$ldxx_host')
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteSourcesForArch(
self.ninja, config_name, config, sources, predepends,
precompiled_header, spec)
else:
return dict((arch, self.WriteSourcesForArch(
self.arch_subninjas[arch], config_name, config, sources, predepends,
precompiled_header, spec, arch=arch))
for arch in self.archs)
def WriteSourcesForArch(self, ninja_file, config_name, config, sources,
predepends, precompiled_header, spec, arch=None):
"""Write build rules to compile all of |sources|."""
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name, arch=arch)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
# See comment at cc_command for why there's two .pdb files.
pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath_c:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name))
pdbpath_c = pdbpath + '.c.pdb'
pdbpath_cc = pdbpath + '.cc.pdb'
self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c])
self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc])
self.WriteVariableList(ninja_file, 'pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
# Respect environment variables related to build, but target-specific
# flags can still override them.
if self.toolset == 'target':
cflags_c = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CFLAGS', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CXXFLAGS', '').split() + cflags_cc)
defines = config.get('defines', []) + extra_defines
self.WriteVariableList(ninja_file, 'defines',
[Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList(ninja_file, 'rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
env = self.GetSortedXcodeEnv()
if self.flavor == 'win':
env = self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR',
config=config_name)
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
self.WriteVariableList(ninja_file, 'includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands(arch)
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext, arch)
if include: ninja_file.variable(var, include)
self.WriteVariableList(ninja_file, 'cflags',
map(self.ExpandSpecial, cflags))
self.WriteVariableList(ninja_file, 'cflags_c',
map(self.ExpandSpecial, cflags_c))
self.WriteVariableList(ninja_file, 'cflags_cc',
map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList(ninja_file, 'cflags_objc',
map(self.ExpandSpecial, cflags_objc))
self.WriteVariableList(ninja_file, 'cflags_objcc',
map(self.ExpandSpecial, cflags_objcc))
ninja_file.newline()
outputs = []
has_rc_source = False
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
self.uses_cpp = True
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
self.msvs_settings.GetArch(config_name) == 'x86' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
# Asm files only get auto assembled for x86 (not x64).
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
self.uses_cpp = True
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
has_rc_source = True
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
if arch is not None:
output = AddArch(output, arch)
implicit = precompiled_header.GetObjDependencies([input], [output], arch)
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
ninja_file.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
if has_rc_source:
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
self.WriteVariableList(ninja_file, 'resource_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in resource_include_dirs])
self.WritePchTargets(ninja_file, pch_commands)
ninja_file.newline()
return outputs
def WritePchTargets(self, ninja_file, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteLinkForArch(
self.ninja, spec, config_name, config, link_deps)
else:
output = self.ComputeOutput(spec)
inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec,
config_name, config, link_deps[arch],
arch=arch)
for arch in self.archs]
extra_bindings = []
if not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
self.ninja.build(output, 'lipo', inputs, variables=extra_bindings)
return output
def WriteLinkForArch(self, ninja_file, spec, config_name, config,
link_deps, arch=None):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
new_deps = []
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
new_deps = target.component_objs
elif self.flavor == 'win' and target.import_lib:
new_deps = [target.import_lib]
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
new_deps = [target.binary]
for new_dep in new_deps:
if new_dep not in extra_link_deps:
extra_link_deps.add(new_dep)
link_deps.append(new_dep)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
extra_bindings = []
if self.uses_cpp and self.flavor != 'win':
extra_bindings.append(('ld', '$ldxx'))
output = self.ComputeOutput(spec, arch)
if arch is None and not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
is_executable = spec['type'] == 'executable'
# The ldflags config key is not used on mac or win. On those platforms
# linker flags are set via xcode_settings and msvs_settings, respectively.
env_ldflags = os.environ.get('LDFLAGS', '').split()
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja, arch)
ldflags = env_ldflags + ldflags
elif self.flavor == 'win':
manifest_base_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, intermediate_manifest, manifest_files = \
self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja,
self.ExpandSpecial, manifest_base_name,
output, is_executable,
self.toplevel_build)
ldflags = env_ldflags + ldflags
self.WriteVariableList(ninja_file, 'manifests', manifest_files)
implicit_deps = implicit_deps.union(manifest_files)
if intermediate_manifest:
self.WriteVariableList(
ninja_file, 'intermediatemanifest', [intermediate_manifest])
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
# Respect environment variables related to build, but target-specific
# flags can still override them.
ldflags = env_ldflags + config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append('-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList(ninja_file, 'ldflags',
gyp.common.uniquer(map(self.ExpandSpecial, ldflags)))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries, config_name)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries)
linked_binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor == 'win':
extra_bindings.append(('binary', output))
if '/NOENTRY' not in ldflags:
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
output = [output, self.target.import_lib]
if pdbname:
output.append(pdbname)
elif not self.is_mac_bundle:
output = [output, output + '.TOC']
else:
command = command + '_notoc'
elif self.flavor == 'win':
extra_bindings.append(('binary', output))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
if pdbname:
output = [output, pdbname]
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
ninja_file.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
variables=extra_bindings)
return linked_binary
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
extra_link_deps = any(self.target_outputs.get(dep).Linkable()
for dep in spec.get('dependencies', [])
if dep in self.target_outputs)
if spec['type'] == 'none' or (not link_deps and not extra_link_deps):
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
self.target.type = 'none'
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
if (self.flavor not in ('mac', 'openbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps)
else:
variables = []
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
if self.flavor != 'mac' or len(self.archs) == 1:
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
inputs = []
for arch in self.archs:
output = self.ComputeOutput(spec, arch)
self.arch_subninjas[arch].build(output, 'alink', link_deps[arch],
order_only=compile_deps,
variables=variables)
inputs.append(output)
# TODO: It's not clear if libtool_flags should be passed to the alink
# call that combines single-arch .a files into a fat .a file.
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', inputs,
# FIXME: test proving order_only=compile_deps isn't
# needed.
variables=variables)
else:
self.target.binary = self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends, is_empty):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
if is_empty:
output += '.stamp'
variables = []
self.AppendPostbuildVariable(variables, spec, output, self.target.binary,
is_command_start=not package_framework)
if package_framework and not is_empty:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def AppendPostbuildVariable(self, variables, spec, output, binary,
is_command_start=False):
"""Adds a 'postbuild' variable if there is a postbuild for |output|."""
postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start)
if postbuild:
variables.append(('postbuilds', postbuild))
def GetPostbuildCommand(self, spec, output, output_binary, is_command_start):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
if output_binary is not None:
postbuilds = self.xcode_settings.AddImplicitPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
postbuilds, quiet=True)
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, arch=None):
"""Compute the path for the final output of the spec."""
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if arch is None and self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if arch is None and 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if arch is not None:
# Make sure partial executables don't end up in a bundle or the regular
# output directory.
archdir = 'arch'
if self.toolset != 'target':
archdir = os.path.join('arch', '%s' % self.toolset)
return os.path.join(archdir, AddArch(filename, arch))
elif type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, ninja_file, var, values):
assert not isinstance(values, str)
if values is None:
values = []
ninja_file.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, restat=True,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.exe'
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.lib'
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.dll'
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def ComputeOutputDir(params):
"""Returns the path from the toplevel_dir to the build output directory."""
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
# Relative path from source root to our output files. e.g. "out"
return os.path.normpath(os.path.join(generator_dir, output_dir))
def CalculateGeneratorInputInfo(params):
"""Called by __init__ to initialize generator values based on params."""
# E.g. "out/gypfiles"
toplevel = params['options'].toplevel_dir
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, ComputeOutputDir(params), 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
gyp.common.EnsureDirExists(path)
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
mem_limit = max(1, stat.ullTotalPhys / (4 * (2 ** 30))) # total / 4GB
hard_cap = max(1, int(os.getenv('GYP_LINK_CONCURRENCY_MAX', 2**32)))
return min(mem_limit, hard_cap)
elif sys.platform.startswith('linux'):
if os.path.exists("/proc/meminfo"):
with open("/proc/meminfo") as meminfo:
memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB')
for line in meminfo:
match = memtotal_re.match(line)
if not match:
continue
# Allow 8Gb per link on Linux because Gold is quite memory hungry
return max(1, int(match.group(1)) / (8 * (2 ** 20)))
return 1
elif sys.platform == 'darwin':
try:
avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']))
# A static library debug build of Chromium's unit_tests takes ~2.7GB, so
# 4GB per ld process allows for some more bloat.
return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB
except:
return 1
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding is enabled."""
return '_embed' if embed_manifest else ''
def _AddWinLinkRules(master_ninja, embed_manifest):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \
'%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \
'$manifests' % {
'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name,
'embed': embed_manifest }
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest)
use_separate_mspdbsrv = (
int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0)
dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo $implibflag /DLL /OUT:$binary '
'@$binary.rsp' % (sys.executable, use_separate_mspdbsrv))
dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo /OUT:$binary @$binary.rsp' %
(sys.executable, use_separate_mspdbsrv))
exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $binary' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$binary.rsp',
rspfile_content='$in_newline $libs $ldflags',
pool='link_pool')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(
os.path.join(ComputeOutputDir(params), config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja'))
master_ninja = ninja_syntax.Writer(master_ninja_file, width=120)
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
# Overridden by local arch choice in the use_deps case.
# Chromium's ffmpeg c99conv.py currently looks for a 'cc =' line in
# build.ninja so needs something valid here. http://crbug.com/233985
cc = 'cl.exe'
cxx = 'cl.exe'
ld = 'link.exe'
ld_host = '$ld'
else:
cc = 'cc'
cxx = 'c++'
ld = '$cc'
ldxx = '$cxx'
ld_host = '$cc_host'
ldxx_host = '$cxx_host'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
clang_cl = None
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'CC':
cc = os.path.join(build_to_root, value)
if cc.endswith('clang-cl'):
clang_cl = cc
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, OpenOutput)
for arch, path in cl_paths.iteritems():
if clang_cl:
# If we have selected clang-cl, use that instead.
path = clang_cl
command = CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, 'win'))
if clang_cl:
# Use clang-cl to cross-compile for x86 or x86_64.
command += (' -m32' if arch == 'x86' else ' -m64')
master_ninja.variable('cl_' + arch, command)
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', 'lib.exe')
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('asm', 'ml.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], 'ar'))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], 'ar'))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.variable('ldxx_host', CommandWithWrapper(
'LINK', wrappers, ldxx_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
# TODO(scottmg) Separate pdb names is a test to see if it works around
# http://crbug.com/142362. It seems there's a race between the creation of
# the .pdb by the precompiled header step for .cc and the compilation of
# .c files. This should be handled by mspdbsrv, but rarely errors out with
# c1xx : fatal error C1033: cannot open program database
# By making the rules target separate pdb files this might be avoided.
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_c ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $resource_includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $in',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ]; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then mv $lib.tmp $lib.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ readelf -d $lib | grep SONAME ; '
'nm -gD -f p $lib | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive '
'$libs'}),
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--start-group $in $solibs -Wl,--end-group '
'$libs'}),
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in $solibs -Wl,--end-group $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch False '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True)
_AddWinLinkRules(master_ninja, embed_manifest=False)
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
master_ninja.rule(
'lipo',
description='LIPO $out, POSTBUILDS',
command='rm -f $out && lipo -create $in -output $out$postbuilds')
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s'
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e $lib.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > $lib.TOC; '
'else '
'%(solink)s && %(extract_toc)s > $lib.tmp && '
'if ! cmp -s $lib.tmp $lib.TOC; then '
'mv $lib.tmp $lib.TOC ; '
'fi; '
'fi'
% { 'solink': solink_base,
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
solink_suffix = '$in $solibs $libs$postbuilds'
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-shared'},
pool='link_pool')
master_ninja.rule(
'solink_notoc',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix':solink_suffix, 'type': '-shared'},
pool='link_pool')
solink_module_suffix = '$in $solibs $libs$postbuilds'
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_module_suffix,
'type': '-bundle'},
pool='link_pool')
master_ninja.rule(
'solink_module_notoc',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix': solink_module_suffix, 'type': '-bundle'},
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'preprocess_infoplist',
description='PREPROCESS INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'copy_infoplist',
description='COPY INFOPLIST $in',
command='$env ./gyp-mac-tool copy-info-plist $in $out $keys')
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='ln -f $in $out 2>/dev/null || (rm -rf $out && cp -af $in $out)')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir)
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
ninja_output = StringIO()
writer = NinjaWriter(qualified_target, target_outputs, base_path, build_dir,
ninja_output,
toplevel_build, output_file,
flavor, toplevel_dir=options.toplevel_dir)
target = writer.WriteSpec(spec, config_name, generator_flags)
if ninja_output.tell() > 0:
# Only create files for ninja files that actually have contents.
with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file:
ninja_file.write(ninja_output.getvalue())
ninja_output.close()
master_ninja.subninja(output_file)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
master_ninja_file.close()
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
# Update target_dicts for iOS device builds.
target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator(
target_dicts)
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
|
gpl-3.0
|
kiall/designate-py3
|
designate/storage/impl_sqlalchemy/migrate_repo/versions/066_add_update_status_index.py
|
8
|
1469
|
# Copyright (c) 2015 Rackspace Inc.
#
# Author: Tim Simmons <tim.simmons@rackspace.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Index, MetaData, Table
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
meta = MetaData()
def index_exists(index):
table = index[1]._get_table()
cols = sorted([str(x).split('.')[1] for x in index[1:]])
for idx in table.indexes:
if sorted(idx.columns.keys()) == cols:
return True
return False
def upgrade(migrate_engine):
meta.bind = migrate_engine
records_table = Table('records', meta, autoload=True)
indices = [
['update_status_index', records_table.c.status,
records_table.c.domain_id, records_table.c.tenant_id,
records_table.c.created_at, records_table.c.serial]
]
for ind in indices:
if not index_exists(ind):
index = Index(*ind)
index.create(migrate_engine)
|
apache-2.0
|
all-of-us/raw-data-repository
|
tests/api_tests/test_ppi_data_check_api.py
|
1
|
2788
|
from rdr_service.model.code import CodeType
from tests.helpers.unittest_base import BaseTestCase
class CheckPpiDataApiTest(BaseTestCase):
def setUp(self):
super(CheckPpiDataApiTest, self).setUp(with_consent_codes=True)
self.participant_summary = self.data_generator.create_database_participant_summary(email='test@example.com')
questions_and_answers = [
('first_question_code', 'first_answer_code'),
('Second_CODE', 'ANOTHER_ANSWER'),
('LAST_CODE', 'Final_Answer|with_additional_option')
]
questionnaire = self.data_generator.create_database_questionnaire_history()
for question_code_value, _ in questions_and_answers:
question_code = self.data_generator.create_database_code(
value=question_code_value,
codeType=CodeType.QUESTION
)
self.data_generator.create_database_questionnaire_question(
questionnaireId=questionnaire.questionnaireId,
questionnaireVersion=questionnaire.version,
codeId=question_code.codeId
)
questionnaire_response = self.data_generator.create_database_questionnaire_response(
participantId=self.participant_summary.participantId,
questionnaireId=questionnaire.questionnaireId,
questionnaireVersion=questionnaire.version
)
for question_index, (_, answer_code_values) in enumerate(questions_and_answers):
question = questionnaire.questions[question_index]
for answer_value in answer_code_values.split('|'):
answer_code = self.data_generator.create_database_code(value=answer_value)
self.data_generator.create_database_questionnaire_response_answer(
questionnaireResponseId=questionnaire_response.questionnaireResponseId,
questionId=question.questionnaireQuestionId,
valueCodeId=answer_code.codeId
)
def test_case_insensitive_answer_code_matching(self):
"""Make sure case doesn't matter when matching answer codes against what the server has"""
ppi_check_payload = {
'ppi_data': {
self.participant_summary.email: {
'fIrSt_QuEsTiOn_CoDe': 'First_Answer_Code',
'SECOND_CODE': 'another_answer',
'last_code': 'Final_ANSWER|WITH_ADDITIONAL_OPTION'
}
}
}
response = self.send_post('CheckPpiData', ppi_check_payload)
response_error_count = response['ppi_results']['test@example.com']['errors_count']
self.assertEqual(0, response_error_count, 'Differences in case should not cause errors')
|
bsd-3-clause
|
rosemead/namebench
|
nb_third_party/dns/rdtypes/txtbase.py
|
248
|
2986
|
# Copyright (C) 2006, 2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""TXT-like base class."""
import dns.exception
import dns.rdata
import dns.tokenizer
class TXTBase(dns.rdata.Rdata):
"""Base class for rdata that is like a TXT record
@ivar strings: the text strings
@type strings: list of string
@see: RFC 1035"""
__slots__ = ['strings']
def __init__(self, rdclass, rdtype, strings):
super(TXTBase, self).__init__(rdclass, rdtype)
if isinstance(strings, str):
strings = [ strings ]
self.strings = strings[:]
def to_text(self, origin=None, relativize=True, **kw):
txt = ''
prefix = ''
for s in self.strings:
txt += '%s"%s"' % (prefix, dns.rdata._escapify(s))
prefix = ' '
return txt
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
strings = []
while 1:
token = tok.get().unescape()
if token.is_eol_or_eof():
break
if not (token.is_quoted_string() or token.is_identifier()):
raise dns.exception.SyntaxError("expected a string")
if len(token.value) > 255:
raise dns.exception.SyntaxError("string too long")
strings.append(token.value)
if len(strings) == 0:
raise dns.exception.UnexpectedEnd
return cls(rdclass, rdtype, strings)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
for s in self.strings:
l = len(s)
assert l < 256
byte = chr(l)
file.write(byte)
file.write(s)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
strings = []
while rdlen > 0:
l = ord(wire[current])
current += 1
rdlen -= 1
if l > rdlen:
raise dns.exception.FormError
s = wire[current : current + l]
current += l
rdlen -= l
strings.append(s)
return cls(rdclass, rdtype, strings)
from_wire = classmethod(from_wire)
def _cmp(self, other):
return cmp(self.strings, other.strings)
|
apache-2.0
|
CanalTP/navitia
|
source/log_analyzer/log_analyzer.py
|
2
|
7813
|
#! /usr/bin/python3
import plotly
from sortedcontainers import SortedKeyList
import re, os, argparse
kraken_regex = re.compile(r".* - Api : (.+), worker : (.+), request : (.+), start : (\d+), end : (\d+)")
jormun_regex = re.compile(r".*Task : (.+), request : (.+), start : (.+), end : (.+)")
generate_id_regex = re.compile(r".*Generating id : (.+) for request : (.+)")
request_regex = re.compile(r"journeys_(\w+)#(\w+)#(\w*)")
class KrakenTask:
def __init__(self, api, sub_request_id, worker, start, end):
self.api = api
self.sub_request_id = sub_request_id
self.worker = worker
self.start = int(start)
self.end = int(end)
class JormunTask:
def __init__(self, task_name, sub_request_id, start, end):
self.task_name = task_name
self.sub_request_id = sub_request_id
self.start = int(start)
self.end = int(end)
class Request:
def __init__(self, request_id, scenario):
self.request_id = request_id
self.scenario = scenario
self.kraken_tasks_by_worker = {} # dict worker -> list of kraken tasks
# list of JormunTask, sorted to have first tasks with the earliest start time
# and between two tasks with the same start time, the one with the latest end time comes first
self.jormun_tasks = SortedKeyList([], key=lambda task: (task.start, -task.end))
self.url = ""
def add_kraken_task(self, api, sub_request_id, worker, start, end):
worker_tasks = self.kraken_tasks_by_worker.get(worker, [])
worker_tasks.append(KrakenTask(api, sub_request_id, worker, start, end))
self.kraken_tasks_by_worker[worker] = worker_tasks
def add_jormun_task(self, task_name, sub_request_id, start, end):
self.jormun_tasks.add(JormunTask(task_name, sub_request_id, start, end))
def set_url(self, url):
self.url = url
def myprint(self):
print("Request : " + self.request_id + " scenario : " + self.scenario + " url : " + self.url)
for worker, tasks in self.kraken_tasks_by_worker.items():
print(" Worker : " + worker)
for task in tasks:
print(
" Api : " + task.api + " sub_request : " + task.sub_request_id + " start : ",
task.start,
" end : ",
task.end,
)
print(" Jormun : ")
for task in self.jormun_tasks:
print(
" Task : " + task.task_name + " sub_request : " + task.sub_request_id + " start : ",
task.start,
" end : ",
task.end,
)
def create_gantt(self, output_dir):
# https://plotly.com/python/gantt/
import plotly.figure_factory
def make_jormun_dict(task):
return dict(
Task="J{}#{}".format(task.sub_request_id, task.task_name),
Start=task.start,
Finish=task.end,
SubRequest="J{}#{}".format(task.sub_request_id, task.task_name),
)
def make_kraken_dict(task):
return dict(
Task="Kraken_{}".format(task.worker),
Start=task.start,
Finish=task.end,
SubRequest="K{}".format(task.sub_request_id),
)
df = [make_jormun_dict(task) for task in self.jormun_tasks]
df.extend([make_kraken_dict(task) for tasks in self.kraken_tasks_by_worker.values() for task in tasks])
# if the dict is empty, there is nothing to plot
if not df:
print("Nothing to plot for request_id : ", self.request_id)
return
import colorsys
nb_of_colors = len(df)
HSV_tuples = [(x * 1.0 / nb_of_colors, 0.5, 0.5) for x in range(nb_of_colors)]
RGB_tuples = [colorsys.hsv_to_rgb(*x) for x in HSV_tuples]
fig = plotly.figure_factory.create_gantt(
df, index_col='SubRequest', group_tasks=True, colors=RGB_tuples, show_colorbar=True, title=self.url
)
fig.update_layout(hoverlabel=dict(bgcolor="white", font_size=16, font_family="Rockwell", namelength=-1))
html_str = plotly.io.to_html(fig)
html_filename = "{}_{}.html".format(self.request_id, self.scenario)
filepath = os.path.join(output_dir, html_filename)
os.makedirs(output_dir, exist_ok=True)
with open(filepath, 'w') as html_file:
html_file.write(html_str)
print("Created ", filepath)
# fig.show(renderer="png")
def run(kraken_log_filepath, jormun_log_filepath, output_dir):
requests = {} # dict (request_id, scenario) -> Request
with open(kraken_log_filepath) as kraken_log_file:
for l in kraken_log_file.readlines():
match = kraken_regex.match(l)
if match:
api = match.group(1)
worker = match.group(2)
kraken_request_id = match.group(3)
start = match.group(4)
end = match.group(5)
# print("Api : " + api + " worker : " + worker + " request : " + kraken_request_id + " start : " + start + " end : " + end)
request_match = request_regex.match(kraken_request_id)
if request_match:
request_id = request_match.group(1)
scenario = request_match.group(2)
sub_request_id = request_match.group(3)
# print("request_id : " + request_id + " scenario : " + scenario + " sub_request : " + sub_request_id)
request = requests.get((request_id, scenario), Request(request_id, scenario))
request.add_kraken_task(api, sub_request_id, worker, start, end)
requests[(request_id, scenario)] = request
with open(jormun_log_filepath) as jormun_log_file:
for l in jormun_log_file.readlines():
match = jormun_regex.match(l)
if match:
task_name = match.group(1)
kraken_request_id = match.group(2)
start = match.group(3)
end = match.group(4)
request_match = request_regex.match(kraken_request_id)
if request_match:
request_id = request_match.group(1)
scenario = request_match.group(2)
sub_request_id = request_match.group(3)
request = requests.get((request_id, scenario), Request(request_id, scenario))
request.add_jormun_task(task_name, sub_request_id, start, end)
requests[(request_id, scenario)] = request
match = generate_id_regex.match(l)
if match:
kraken_request_id = match.group(1)
url = match.group(2)
request_match = request_regex.match(kraken_request_id)
if request_match:
request_id = request_match.group(1)
scenario = request_match.group(2)
request = requests.get((request_id, scenario), Request(request_id, scenario))
request.set_url(url)
requests[(request_id, scenario)] = request
for request in requests.values():
# request.myprint()
request.create_gantt(output_dir)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--kraken_log_file", required=True, nargs=1)
parser.add_argument("--jormun_log_file", required=True, nargs=1)
parser.add_argument("--output_dir", required=True, nargs=1)
args = parser.parse_args()
run(args.kraken_log_file[0], args.jormun_log_file[0], args.output_dir[0])
if __name__ == '__main__':
main()
|
agpl-3.0
|
wooga/airflow
|
airflow/example_dags/example_nested_branch_dag.py
|
1
|
2019
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example DAG demonstrating a workflow with nested branching. The join tasks are created with
``none_failed_or_skipped`` trigger rule such that they are skipped whenever their corresponding
``BranchPythonOperator`` are skipped.
"""
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python import BranchPythonOperator
from airflow.utils.dates import days_ago
with DAG(dag_id="example_nested_branch_dag", start_date=days_ago(2), schedule_interval="@daily") as dag:
branch_1 = BranchPythonOperator(task_id="branch_1", python_callable=lambda: "true_1")
join_1 = DummyOperator(task_id="join_1", trigger_rule="none_failed_or_skipped")
true_1 = DummyOperator(task_id="true_1")
false_1 = DummyOperator(task_id="false_1")
branch_2 = BranchPythonOperator(task_id="branch_2", python_callable=lambda: "true_2")
join_2 = DummyOperator(task_id="join_2", trigger_rule="none_failed_or_skipped")
true_2 = DummyOperator(task_id="true_2")
false_2 = DummyOperator(task_id="false_2")
false_3 = DummyOperator(task_id="false_3")
branch_1 >> true_1 >> join_1
branch_1 >> false_1 >> branch_2 >> [true_2, false_2] >> join_2 >> false_3 >> join_1
|
apache-2.0
|
mxOBS/deb-pkg_xbmc-imx6
|
addons/service.xbmc.versioncheck/service.py
|
17
|
4169
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Team-XBMC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import platform
import xbmc
import lib.common
from lib.common import log, dialog_yesno
from lib.common import upgrade_message as _upgrademessage
from lib.common import upgrade_message2 as _upgrademessage2
__addon__ = lib.common.__addon__
__addonversion__ = lib.common.__addonversion__
__addonname__ = lib.common.__addonname__
__addonpath__ = lib.common.__addonpath__
__icon__ = lib.common.__icon__
oldversion = False
class Main:
def __init__(self):
linux = False
packages = []
xbmc.sleep(5000)
if xbmc.getCondVisibility('System.Platform.Linux') and __addon__.getSetting("upgrade_apt") == 'true':
packages = ['xbmc']
_versionchecklinux(packages)
# temporary don't notify Windows untill crashing has been solved
elif xbmc.getCondVisibility('System.Platform.Windows'):
pass
else:
oldversion, version_installed, version_available, version_stable = _versioncheck()
if oldversion:
_upgrademessage2( version_installed, version_available, version_stable, oldversion, False)
def _versioncheck():
# initial vars
from lib.jsoninterface import get_installedversion, get_versionfilelist
from lib.versions import compare_version
# retrieve versionlists from supplied version file
versionlist = get_versionfilelist()
# retrieve version installed
version_installed = get_installedversion()
# copmpare installed and available
oldversion, version_installed, version_available, version_stable = compare_version(version_installed, versionlist)
return oldversion, version_installed, version_available, version_stable
def _versionchecklinux(packages):
if platform.dist()[0].lower() in ['ubuntu', 'debian', 'linuxmint']:
handler = False
result = False
try:
# try aptdeamon first
from lib.aptdeamonhandler import AptdeamonHandler
handler = AptdeamonHandler()
except:
# fallback to shell
# since we need the user password, ask to check for new version first
from lib.shellhandlerapt import ShellHandlerApt
sudo = True
handler = ShellHandlerApt(sudo)
if dialog_yesno(32015):
pass
elif dialog_yesno(32009, 32010):
log("disabling addon by user request")
__addon__.setSetting("versioncheck_enable", 'false')
return
if handler:
if handler.check_upgrade_available(packages[0]):
if _upgrademessage(32012, oldversion, True):
if __addon__.getSetting("upgrade_system") == "false":
result = handler.upgrade_package(packages[0])
else:
result = handler.upgrade_system()
if result:
from lib.common import message_upgrade_success, message_restart
message_upgrade_success()
message_restart()
else:
log("Error during upgrade")
else:
log("Error: no handler found")
else:
log("Unsupported platform %s" %platform.dist()[0])
sys.exit(0)
if (__name__ == "__main__"):
log('Version %s started' % __addonversion__)
Main()
|
gpl-2.0
|
dendisuhubdy/tensorflow
|
tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py
|
61
|
15941
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sparse_ops.sparse_tensor_dense_matmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import app
from tensorflow.python.platform import test
def _maybe_complex(x):
if x.dtype.kind == "c": # complex
return (x + 1j * x) / 2
return x
class SparseTensorDenseMatMulTest(test.TestCase):
def _testMatmul(self,
x,
y,
adjoint_a=False,
adjoint_b=False,
indices_dtype=np.int64):
x_mat = np.matrix(x)
if adjoint_a:
x_mat = x_mat.H
y_mat = np.matrix(y)
if adjoint_b:
y_mat = y_mat.H
np_ans = x_mat * y_mat
x_indices = np.vstack(np.where(x)).astype(indices_dtype).T
x_values = x[np.where(x)]
x_shape = x.shape
with self.test_session(use_gpu=True):
sp_x_value = sparse_tensor.SparseTensorValue(
indices=x_indices, values=x_values, dense_shape=x_shape)
tf_value_ans = sparse_ops.sparse_tensor_dense_matmul(
sp_x_value, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
tf_tensor_ans = sparse_ops.sparse_tensor_dense_matmul(
sparse_tensor.SparseTensor.from_value(sp_x_value),
y,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
# Ensure that the RHS shape is known at least.
self.assertEqual(tf_value_ans.get_shape()[1], np_ans.shape[1])
self.assertEqual(tf_tensor_ans.get_shape()[1], np_ans.shape[1])
for out in (tf_value_ans.eval(), tf_tensor_ans.eval()):
if x.dtype == np.float32:
self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-4)
elif x.dtype == np.float64:
self.assertAllClose(np_ans, out, rtol=1e-6, atol=1e-6)
else:
self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-4)
def _testBasic(self, value_dtype, indices_dtype=np.int64):
x = _maybe_complex(np.random.rand(10, 10).astype(value_dtype))
x[np.abs(x) < 0.5] = 0 # Make it sparse
y = _maybe_complex(np.random.randn(10, 20).astype(value_dtype))
self._testMatmul(x, y, indices_dtype=indices_dtype)
def testBasic(self):
np.random.seed(127) # Repeatable results
self._testBasic(np.int32)
self._testBasic(np.float32)
self._testBasic(np.float64)
self._testBasic(np.complex64)
self._testBasic(np.complex128)
self._testBasic(np.int32, indices_dtype=np.int32)
self._testBasic(np.float32, indices_dtype=np.int32)
def testShapeInference(self):
x = np.random.rand(10, 10)
x[np.abs(x) < 0.5] = 0 # Make it sparse
y = np.random.randn(10, 20)
x_indices = np.vstack(np.where(x)).astype(np.int64).T
x_values = x[np.where(x)]
x_shape = x.shape
x_st = sparse_tensor.SparseTensor(x_indices, x_values, x_shape)
result = sparse_ops.sparse_tensor_dense_matmul(x_st, y)
self.assertEqual(result.get_shape(), (10, 20))
x_shape_unknown = array_ops.placeholder(dtype=dtypes.int64, shape=None)
x_st_shape_unknown = sparse_tensor.SparseTensor(x_indices, x_values,
x_shape_unknown)
result_left_shape_unknown = sparse_ops.sparse_tensor_dense_matmul(
x_st_shape_unknown, y)
self.assertEqual(result_left_shape_unknown.get_shape().as_list(),
[None, 20])
x_shape_inconsistent = [10, 15]
x_st_shape_inconsistent = sparse_tensor.SparseTensor(x_indices, x_values,
x_shape_inconsistent)
with self.assertRaisesRegexp(ValueError, "Dimensions must be equal"):
sparse_ops.sparse_tensor_dense_matmul(x_st_shape_inconsistent, y)
def testInvalidIndicesForSparseTensorDenseMatmul(self):
# Note: use_gpu=False because nice errors are only returned from CPU kernel.
with self.test_session(use_gpu=False):
indices = np.matrix([[1, 10]]).astype(np.int64)
values = np.array([10]).astype(np.float32)
shape = [3, 2]
sparse_t = sparse_tensor.SparseTensor(indices, values, shape)
# Test multiplying by both a small and large dense matrix, to hit
# both cases in the kernel.
dense_t = np.matrix([[1] * 5, [2] * 5], dtype=np.float32)
with self.assertRaisesOpError(
"k .10. from index.0,1. out of bounds .>=2."):
sparse_ops.sparse_tensor_dense_matmul(sparse_t, dense_t).eval()
dense_t = np.matrix([[1] * 500, [2] * 500], dtype=np.float32)
with self.assertRaisesOpError(
"k .10. from index.0,1. out of bounds .>=2."):
sparse_ops.sparse_tensor_dense_matmul(sparse_t, dense_t).eval()
# Repeat with adjoint_a, to get a different error.
dense_t = np.matrix([[1] * 5, [2] * 5, [3] * 5], dtype=np.float32)
with self.assertRaisesOpError(
"m .10. from index.0,1. out of bounds .>=2."):
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t, adjoint_a=True).eval()
dense_t = np.matrix([[1] * 500, [2] * 500, [3] * 500], dtype=np.float32)
with self.assertRaisesOpError(
"m .10. from index.0,1. out of bounds .>=2."):
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t, adjoint_a=True).eval()
def testInvalidIndicesForSparseTensorDenseMatmulOnGPU(self):
# Note: use_gpu=False because nice errors are only returned from CPU kerne
if not test.is_gpu_available():
return
with self.test_session(use_gpu=True):
indices = np.array([[1, 10]]).astype(np.int64)
values = np.array([10]).astype(np.float32)
shape = [3, 2]
sparse_t = sparse_tensor.SparseTensor(indices, values, shape)
# Test multiplying by both a small and large dense matrix, to hit
# both cases in the kernel.
dense_t = np.matrix([[1] * 5, [2] * 5], dtype=np.float32)
expected_t = np.array([[0] * 5, [np.nan] * 5, [0] * 5], dtype=np.float32)
self.assertAllClose(expected_t,
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t).eval())
dense_t = np.matrix([[1] * 500, [2] * 500], dtype=np.float32)
expected_t = np.array(
[[0] * 500, [np.nan] * 500, [0] * 500], dtype=np.float32)
self.assertAllClose(expected_t,
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t).eval())
# Repeat with adjoint_a, now the error is that the sparse index
# is OOO w.r.t. the output. The GPU kernel can't do much here,
# so it just doesn't accumulate.
dense_t = np.matrix([[1] * 5, [2] * 5, [3] * 5], dtype=np.float32)
expected_t = np.array([[0] * 5, [0] * 5], dtype=np.float32)
self.assertAllClose(expected_t,
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t, adjoint_a=True).eval())
dense_t = np.matrix([[1] * 500, [2] * 500, [3] * 500], dtype=np.float32)
expected_t = np.array([[0] * 500, [0] * 500], dtype=np.float32)
self.assertAllClose(expected_t,
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t, adjoint_a=True).eval())
# Tests setting one dimension to be a high value.
def _testLarge(self, np_dtype):
r1 = np.random.randint(6000, 20000)
r2 = np.random.randint(1, 10)
r3 = np.random.randint(1, 10)
for m, k, n in [(r1, r2, r3),
(r2, r1, r3),
(r2, r3, r1)]:
x = _maybe_complex(np.random.rand(m, k).astype(np_dtype))
x[np.abs(x) < 0.8] = 0
y = _maybe_complex(np.random.randn(k, n).astype(np_dtype))
self._testMatmul(x, y, adjoint_a=False, adjoint_b=False)
self._testMatmul(x.transpose(), y, adjoint_a=True, adjoint_b=False)
self._testMatmul(x, y.transpose(), adjoint_a=False, adjoint_b=True)
self._testMatmul(
x.transpose(), y.transpose(), adjoint_a=True, adjoint_b=True)
np.random.seed(127) # Repeatable results
self._testLarge(np.float32)
self._testLarge(np.float64)
self._testLarge(np.complex64)
self._testLarge(np.complex128)
# Tests random sized matrices.
def testFloatRandom(self):
np.random.seed(127) # Repeatable results
for _ in range(8):
for adjoint_a in [True, False]:
for adjoint_b in [True, False]:
for thresh in [0.0, 0.2, 0.8, 1.0]:
n, k, m = np.random.randint(1, 100, size=3)
x = np.random.rand(n, k).astype(np.float32)
x[x < thresh] = 0 # Make it sparse
y = np.random.randn(k, m).astype(np.float32)
x = x.transpose() if adjoint_a else x
y = y.transpose() if adjoint_b else y
self._testMatmul(x, y, adjoint_a, adjoint_b)
def _sparse_tensor_dense_vs_dense_matmul_benchmark_dense(x, y, adjoint_a,
adjoint_b):
def body(t, prev):
with ops.control_dependencies([prev]):
return (t + 1, math_ops.matmul(
x,
y,
transpose_a=adjoint_a,
transpose_b=adjoint_b,
a_is_sparse=True,
b_is_sparse=False))
t0 = constant_op.constant(0)
v0 = constant_op.constant(0.0)
def _timeit(iterations, _):
(_, final) = control_flow_ops.while_loop(
lambda t, _: t < iterations,
body, (t0, v0),
parallel_iterations=1,
back_prop=False,
shape_invariants=(tensor_shape.TensorShape(()),
tensor_shape.TensorShape(None)))
return [final]
return _timeit
def _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(x_ind, x_val, x_shape,
y, adjoint_a,
adjoint_b):
sp_x = sparse_tensor.SparseTensor(
indices=x_ind, values=x_val, dense_shape=x_shape)
def body(t, prev):
with ops.control_dependencies([prev]):
return (t + 1, sparse_ops.sparse_tensor_dense_matmul(
sp_x, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b))
t0 = constant_op.constant(0)
v0 = constant_op.constant(0.0)
def _timeit(iterations, _):
(_, final) = control_flow_ops.while_loop(
lambda t, _: t < iterations,
body, (t0, v0),
parallel_iterations=1,
back_prop=False,
shape_invariants=(tensor_shape.TensorShape(()),
tensor_shape.TensorShape(None)))
return [final]
return _timeit
def sparse_tensor_dense_vs_dense_matmul_benchmark(thresh,
m,
k,
n,
adjoint_a,
adjoint_b,
use_gpu,
skip_dense=False):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Configurable for benchmarking:
# config.intra_op_parallelism_threads = 100
# config.gpu_options.per_process_gpu_memory_fraction = 0.3
np.random.seed([6, 117]) # Reproducibility
x = np.random.rand(m, k).astype(np.float32)
x[x < thresh] = 0
y = np.random.randn(k, n).astype(np.float32)
if adjoint_a:
x = x.T
if adjoint_b:
y = y.T
def _timer(sess, ops_fn, iterations):
# Warm in
sess.run(ops_fn(10, sess))
# Timing run
start = time.time()
sess.run(ops_fn(iterations, sess))
end = time.time()
return (end - start) / (1.0 * iterations) # Average runtime per iteration
# Using regular matmul, marking one of the matrices as dense.
if skip_dense:
delta_dense = float("nan")
else:
with session.Session(config=config, graph=ops.Graph()) as sess:
if not use_gpu:
with ops.device("/cpu:0"):
x_t = constant_op.constant(x)
y_t = constant_op.constant(y)
ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_dense(
x_t, y_t, adjoint_a, adjoint_b)
else:
with ops.device("/device:GPU:0"):
x_t = constant_op.constant(x)
y_t = constant_op.constant(y)
ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_dense(
x_t, y_t, adjoint_a, adjoint_b)
delta_dense = _timer(sess, ops_fn, 200)
# Using sparse_tensor_dense_matmul.
with session.Session("", config=config, graph=ops.Graph()) as sess:
if not use_gpu:
with ops.device("/cpu:0"):
x_ind = constant_op.constant(np.vstack(np.where(x)).astype(np.int64).T)
x_val = constant_op.constant(x[np.where(x)])
x_shape = constant_op.constant(np.array(x.shape).astype(np.int64))
y_t = constant_op.constant(y)
ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(
x_ind, x_val, x_shape, y_t, adjoint_a, adjoint_b)
else:
with ops.device("/device:GPU:0"):
x_ind = constant_op.constant(np.vstack(np.where(x)).astype(np.int64).T)
x_val = constant_op.constant(x[np.where(x)])
x_shape = constant_op.constant(np.array(x.shape).astype(np.int64))
y_t = constant_op.constant(y)
ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(
x_ind, x_val, x_shape, y_t, adjoint_a, adjoint_b)
delta_sparse = _timer(sess, ops_fn, 200)
print("%g \t %d \t %s \t %d \t %d \t %g \t %g \t %g" %
(1 - thresh, n, use_gpu, m, k, delta_dense, delta_sparse,
delta_sparse / delta_dense))
def main(_):
print("DenseDense MatMul (w/ Sparse Flag) vs. SparseTensorDense MatMul")
print("Matrix sizes:")
print(" A sparse [m, k] with % nonzero values between 1% and 80%")
print(" B dense [k, n]")
print("")
print("% nnz \t n \t gpu \t m \t k \t dt(dense) \t dt(sparse) "
"\t dt(sparse)/dt(dense)")
for thresh in (0.99, 0.8, 0.5, 0.2):
for n in (50, 100):
for use_gpu in (True, False):
for m in (100, 1000):
for k in (100, 1000):
sparse_tensor_dense_vs_dense_matmul_benchmark(
thresh, m, k, n, False, False, use_gpu=use_gpu)
# Enable for large scale benchmarks, these ones take a long time to run.
#
# for use_gpu in (True, False):
# sparse_tensor_dense_vs_dense_matmul_benchmark(
# thresh=0.99, m=1000000, k=1000, n=100, adjoint_a=False,
# adjoint_b=False, use_gpu=use_gpu, skip_dense=True)
if __name__ == "__main__":
if "--benchmarks" in sys.argv:
sys.argv.remove("--benchmarks")
app.run()
else:
test.main()
|
apache-2.0
|
mojolab/LivingData
|
lib/livdatops.py
|
1
|
1153
|
import pandas
def getColRenameDict(mergersheet,sheet):
colrenamedict={}
originalcolnames=mergersheet[sheet].fillna("NA")
newcolnames=mergersheet[mergersheet.columns[0]]
for i in range(0,len(originalcolnames)):
colrenamedict[originalcolnames[i]]=newcolnames[i]
# if originalcolnames[i]!="NA":
# colrenamedict[originalcolnames[i]]=newcolnames[i]
return colrenamedict
def createMergedDFList(dflist,mergersheetname):
altereddfs={}
for sheet,matrix in dflist.iteritems():
if sheet == mergersheetname:
altereddfs[sheet]=matrix
mergersheet=matrix
else:
df=matrix
print df.columns
columnrenamedict=getColRenameDict(mergersheet,sheet)
print columnrenamedict
altereddf=df.rename(columns=columnrenamedict)
for key,value in columnrenamedict.iteritems():
if key =="NA":
altereddf[value]=0
print df,altereddf
altereddfs[sheet]=altereddf
finalsheet=[]
for sheet,matrix in altereddfs.iteritems():
if sheet!=mergersheetname:
finalsheet.append(matrix.fillna(0))
finalsheetm=pandas.concat(finalsheet)
finalsheetname=mergersheet.columns.values[0]
altereddfs[finalsheetname]=finalsheetm
return altereddfs
|
apache-2.0
|
katyhuff/moose
|
framework/contrib/nsiqcppstyle/rules/RULE_10_1_B_do_not_use_bufferoverflow_risky_function_for_windows.py
|
43
|
2942
|
"""
Do not use buffer overflow risky functions in window env.
if they're found, this rule reports a violation.
== Buffer Overflow Risky Function List in Window ==
- strcat()
- wcscat()
- lstrcat()
- strcat()
- StrCatBuff()
- _tcscat()
- _ftcscat()
- strncat()
- StrNCat()
- strcpy()
- wcscpy()
- lstrcpy()
- strcpy()
- _tcscpy()
- _ftcscpy()
- Strncpy()
- gets()
- _getws()
- _getts()
- sprintf()
- swprintf()
- wsprintf()
- wnsprintf()
- _stprintf()
- _snprintf()
- _snwprintf()
- _sntprintf()
- vsprintf()
- vswprintf()
- wvsprintf()
- wvnsprintf()
- _vstprintf()
- _vsnprintf()
- _vsnwprintf()
- _vsntprintf()
- Strlen()
"""
from nsiqcppstyle_rulemanager import *
import nsiqcppstyle_reporter
windows_bufferoverflow_functions = (
'strcat',
'wcscat',
'lstrcat',
'strcat',
'StrCatBuff',
'_tcscat',
'_ftcscat',
'strncat',
'StrNCat',
'strcpy',
'wcscpy',
'lstrcpy',
'strcpy',
'_tcscpy',
'_ftcscpy',
'Strncpy',
'gets',
'_getws',
'_getts',
'sprintf',
'swprintf',
'wsprintf',
'wnsprintf',
'_stprintf',
'_snprintf',
'_snwprintf',
'_sntprintf',
'vsprintf',
'vswprintf',
'wvsprintf',
'wvnsprintf',
'_vstprintf',
'_vsnprintf',
'_vsnwprintf',
'_vsntprintf',
'Strlen'
)
def RunRule(lexer, contextStack) :
t = lexer.GetCurToken()
if t.type == "ID" :
if t.value in windows_bufferoverflow_functions :
t2 = lexer.PeekNextTokenSkipWhiteSpaceAndComment()
if t2 != None and t2.type == "LPAREN" :
t3 = lexer.PeekPrevTokenSkipWhiteSpaceAndComment()
if t3 == None or t3.type != "PERIOD" :
nsiqcppstyle_reporter.Error(t, __name__,
"Do not use burfferoverflow risky function(%s)" % t.value)
ruleManager.AddFunctionScopeRule(RunRule)
###########################################################################################
# Unit Test
###########################################################################################
from nsiqunittest.nsiqcppstyle_unittestbase import *
class testRule(nct):
def setUpRule(self):
ruleManager.AddFunctionScopeRule(RunRule)
def test1(self):
self.Analyze("thisfile.c",
"""
void func1()
{
k = strcat()
}
""")
assert CheckErrorContent(__name__)
def test2(self):
self.Analyze("thisfile.c",
"""
void func1() {
#define strcat() k
}
""")
assert not CheckErrorContent(__name__)
def test3(self):
self.Analyze("thisfile.c",
"""
void strcat() {
}
""")
assert not CheckErrorContent(__name__)
def test4(self):
self.Analyze("thisfile.c",
"""
void strcat () {
}
""")
assert not CheckErrorContent(__name__)
def test5(self):
self.Analyze("thisfile.c",
"""
void func1()
{
k = help.strcat ()
}
""")
assert not CheckErrorContent(__name__)
|
lgpl-2.1
|
jollyroger/debian-buildbot
|
buildbot/test/unit/test_steps_package_rpm_rpmbuild.py
|
3
|
2875
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot import config
from buildbot.status.results import SUCCESS
from buildbot.steps.package.rpm import rpmbuild
from buildbot.test.fake.remotecommand import ExpectShell
from buildbot.test.util import steps
from twisted.trial import unittest
class RpmBuild(steps.BuildStepMixin, unittest.TestCase):
def setUp(self):
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_no_specfile(self):
self.assertRaises(config.ConfigErrors, lambda:
rpmbuild.RpmBuild())
def test_success(self):
self.setupStep(rpmbuild.RpmBuild(specfile="foo.spec", dist=".el6"))
self.expectCommands(
ExpectShell(workdir='wkdir', command='rpmbuild --define "_topdir '
'`pwd`" --define "_builddir `pwd`" --define "_rpmdir '
'`pwd`" --define "_sourcedir `pwd`" --define "_specdir '
'`pwd`" --define "_srcrpmdir `pwd`" --define "dist .el6" '
'-ba foo.spec',
usePTY='slave-config')
+ ExpectShell.log('stdio',
stdout='lalala')
+ 0)
self.expectOutcome(result=SUCCESS, status_text=['RPMBUILD'])
return self.runStep()
def test_autoRelease(self):
self.setupStep(rpmbuild.RpmBuild(specfile="foo.spec", dist=".el6",
autoRelease=True))
self.expectCommands(
ExpectShell(workdir='wkdir', command='rpmbuild --define "_topdir '
'`pwd`" --define "_builddir `pwd`" --define "_rpmdir `pwd`" '
'--define "_sourcedir `pwd`" --define "_specdir `pwd`" '
'--define "_srcrpmdir `pwd`" --define "dist .el6" '
'--define "_release 0" -ba foo.spec',
usePTY='slave-config')
+ ExpectShell.log('stdio',
stdout='Your code has been rated at 10/10')
+ 0)
self.expectOutcome(result=SUCCESS, status_text=['RPMBUILD'])
return self.runStep()
|
gpl-2.0
|
CSC-IT-Center-for-Science/pouta-blueprints
|
pebbles/views/authorize_instances.py
|
1
|
3153
|
from flask import abort, request, Response, Blueprint
import datetime
import logging
import re
from pebbles.models import InstanceToken
from pebbles.server import restful
authorize_instances = Blueprint('authorize_instances', __name__)
class AuthorizeInstancesView(restful.Resource):
def get(self):
token = ''
instance_id = ''
# The idea here is to check if the original-token and instance-id headers are already present, sent by the nginx proxy of the openshift app,
# if the headers are present that means the authentication had taken place previously and a cookie exists for the openshift app,
# in this case - obtain the info contained in the headers
if 'ORIGINAL-TOKEN' in request.headers and 'INSTANCE-ID' in request.headers:
token = request.headers['ORIGINAL-TOKEN']
instance_id = request.headers['INSTANCE-ID']
# otherwise, the x-original-uri consists of the query string info (which is sent by the openshift driver to the nginx of the openshift app)
# The query string has the token info and instance id
# NOTE: This is only used when the authentication is being done for the first time!
elif 'X-ORIGINAL-URI' in request.headers:
h_uri = request.headers['X-ORIGINAL-URI']
regex_query_capture = re.search('.*\\?(.*)=(.*)&(.*)=(.*)', h_uri) # parse the query string
if regex_query_capture and len(regex_query_capture.groups()) == 4:
if regex_query_capture.group(1) == 'token' and regex_query_capture.group(3) == 'instance_id':
token = regex_query_capture.group(2)
instance_id = regex_query_capture.group(4)
elif regex_query_capture.group(1) == 'instance_id' and regex_query_capture.group(3) == 'token':
instance_id = regex_query_capture.group(2)
token = regex_query_capture.group(4)
if not token and not instance_id:
logging.warn('No instance token or id found from the headers')
return abort(401)
instance_token_obj = InstanceToken.query.filter_by(token=token).first()
if not instance_token_obj:
logging.warn("instance token object %s not found" % token)
return abort(401)
curr_time = datetime.datetime.utcnow()
expires_on = instance_token_obj.expires_on
if curr_time > expires_on:
logging.warn("instance token %s has expired" % token)
return abort(403)
if instance_token_obj.instance_id != instance_id:
logging.warn("instance id %s from the token does not match the instance_id %s passed as a parameter" % (instance_token_obj.instance_id, instance_id))
return abort(403)
resp = Response("Authorized")
# send the headers back to nginx proxy running on the openshift based instance,
# which is going to store it as a cookie for the next time, the authorization takes place
resp.headers["TOKEN"] = instance_token_obj.token
resp.headers["INSTANCE-ID"] = instance_id
return resp
|
mit
|
superchilli/webapp
|
venv/lib/python2.7/site-packages/pygments/token.py
|
365
|
5662
|
# -*- coding: utf-8 -*-
"""
pygments.token
~~~~~~~~~~~~~~
Basic token types and the standard tokens.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
class _TokenType(tuple):
parent = None
def split(self):
buf = []
node = self
while node is not None:
buf.append(node)
node = node.parent
buf.reverse()
return buf
def __init__(self, *args):
# no need to call super.__init__
self.subtypes = set()
def __contains__(self, val):
return self is val or (
type(val) is self.__class__ and
val[:len(self)] == self
)
def __getattr__(self, val):
if not val or not val[0].isupper():
return tuple.__getattribute__(self, val)
new = _TokenType(self + (val,))
setattr(self, val, new)
self.subtypes.add(new)
new.parent = self
return new
def __repr__(self):
return 'Token' + (self and '.' or '') + '.'.join(self)
Token = _TokenType()
# Special token types
Text = Token.Text
Whitespace = Text.Whitespace
Error = Token.Error
# Text that doesn't belong to this lexer (e.g. HTML in PHP)
Other = Token.Other
# Common token types for source code
Keyword = Token.Keyword
Name = Token.Name
Literal = Token.Literal
String = Literal.String
Number = Literal.Number
Punctuation = Token.Punctuation
Operator = Token.Operator
Comment = Token.Comment
# Generic types for non-source code
Generic = Token.Generic
# String and some others are not direct childs of Token.
# alias them:
Token.Token = Token
Token.String = String
Token.Number = Number
def is_token_subtype(ttype, other):
"""
Return True if ``ttype`` is a subtype of ``other``.
exists for backwards compatibility. use ``ttype in other`` now.
"""
return ttype in other
def string_to_tokentype(s):
"""
Convert a string into a token type::
>>> string_to_token('String.Double')
Token.Literal.String.Double
>>> string_to_token('Token.Literal.Number')
Token.Literal.Number
>>> string_to_token('')
Token
Tokens that are already tokens are returned unchanged:
>>> string_to_token(String)
Token.Literal.String
"""
if isinstance(s, _TokenType):
return s
if not s:
return Token
node = Token
for item in s.split('.'):
node = getattr(node, item)
return node
# Map standard token types to short names, used in CSS class naming.
# If you add a new item, please be sure to run this file to perform
# a consistency check for duplicate values.
STANDARD_TYPES = {
Token: '',
Text: '',
Whitespace: 'w',
Error: 'err',
Other: 'x',
Keyword: 'k',
Keyword.Constant: 'kc',
Keyword.Declaration: 'kd',
Keyword.Namespace: 'kn',
Keyword.Pseudo: 'kp',
Keyword.Reserved: 'kr',
Keyword.Type: 'kt',
Name: 'n',
Name.Attribute: 'na',
Name.Builtin: 'nb',
Name.Builtin.Pseudo: 'bp',
Name.Class: 'nc',
Name.Constant: 'no',
Name.Decorator: 'nd',
Name.Entity: 'ni',
Name.Exception: 'ne',
Name.Function: 'nf',
Name.Property: 'py',
Name.Label: 'nl',
Name.Namespace: 'nn',
Name.Other: 'nx',
Name.Tag: 'nt',
Name.Variable: 'nv',
Name.Variable.Class: 'vc',
Name.Variable.Global: 'vg',
Name.Variable.Instance: 'vi',
Literal: 'l',
Literal.Date: 'ld',
String: 's',
String.Backtick: 'sb',
String.Char: 'sc',
String.Doc: 'sd',
String.Double: 's2',
String.Escape: 'se',
String.Heredoc: 'sh',
String.Interpol: 'si',
String.Other: 'sx',
String.Regex: 'sr',
String.Single: 's1',
String.Symbol: 'ss',
Number: 'm',
Number.Float: 'mf',
Number.Hex: 'mh',
Number.Integer: 'mi',
Number.Integer.Long: 'il',
Number.Oct: 'mo',
Operator: 'o',
Operator.Word: 'ow',
Punctuation: 'p',
Comment: 'c',
Comment.Multiline: 'cm',
Comment.Preproc: 'cp',
Comment.Single: 'c1',
Comment.Special: 'cs',
Generic: 'g',
Generic.Deleted: 'gd',
Generic.Emph: 'ge',
Generic.Error: 'gr',
Generic.Heading: 'gh',
Generic.Inserted: 'gi',
Generic.Output: 'go',
Generic.Prompt: 'gp',
Generic.Strong: 'gs',
Generic.Subheading: 'gu',
Generic.Traceback: 'gt',
}
|
mit
|
danmit/django-calaccess-raw-data
|
example/toolbox/management/commands/createfielddocissues.py
|
29
|
5833
|
import os
import time
import calculate
from github import Github
from django.conf import settings
from calaccess_raw import get_model_list
from calaccess_raw.management.commands import CalAccessCommand
from django.contrib.humanize.templatetags.humanize import intcomma
class Command(CalAccessCommand):
help = 'Create GitHub issues for model fields without documentation'
def set_options(self, *args, **kwargs):
"""
Hook up with the GitHub API and prepare to create issues.
"""
self.gh = Github(os.getenv('GITHUB_TOKEN'))
self.org = self.gh.get_organization("california-civic-data-coalition")
self.repo = self.org.get_repo("django-calaccess-raw-data")
self.labels = [
self.repo.get_label("small"),
self.repo.get_label("documentation"),
self.repo.get_label("enhancement"),
]
self.milestone = self.repo.get_milestone(3)
def handle(self, *args, **kwargs):
"""
Make it happen.
"""
self.set_options()
self.header(
"Creating GitHub issues for model fields without documentation"
)
# Loop through all the models and find any fields without docs
field_count = 0
missing_list = []
for m in get_model_list():
field_list = m().get_field_list()
field_count += len(field_list)
for f in field_list:
if not self.has_docs(f):
missing_list.append((m, f))
# If everything is done, declare victory
if not missing_list:
self.success("All %s fields documented!" % field_count)
return False
# If not, loop through the missing and create issues
missing_count = len(missing_list)
self.log(
"- %s/%s (%d%%) of fields lack documentation" % (
intcomma(missing_count),
intcomma(field_count),
calculate.percentage(missing_count, field_count)
)
)
for model, field in missing_list[611:]:
# For now we are excluding the 'other' model module to
# avoid overkill
if model().klass_group != 'other':
self.create_issue(model, field)
def has_docs(self, field):
"""
Test if a Django field has some kind of documentation already.
Returns True or False
"""
if field.name == 'id':
return True
if field.help_text:
return True
if field.__dict__['_verbose_name']:
return True
return False
def create_issue(self, model, field):
"""
Create a GitHub issue for the provided model and field.
"""
title = TITLE_TEMPLATE % (field.name, model().klass_name)
body = BODY_TEMPLATE % (
field.name,
model().klass_name,
model().klass_group,
model().klass_group,
)
self.log("-- Creating issue for %s.%s" % (
model().klass_name,
field.name
)
)
self.repo.create_issue(
title,
body=body,
labels=self.labels,
milestone=self.milestone
)
time.sleep(2.5)
TITLE_TEMPLATE = """
Add documentation for the ``%s`` field on the ``%s`` database model
""".replace("\n", "")
BODY_TEMPLATE = """
## Your mission
Add documentation for the ``%s`` field on the ``%s`` database model.
## Here's how
**Step 1**: Claim this ticket by leaving a comment below. Tell everyone you're ON IT!
**Step 2**: Open up the file that contains this model. It should be in <a href="https://github.com/california-civic-data-coalition/django-calaccess-raw-data/blob/master/calaccess_raw/models/%s.py">calaccess_raw.models.%s.py</a>.
**Step 3**: Hit the little pencil button in the upper-right corner of the code box to begin editing the file.

**Step 4**: Find this model and field in the file. (Clicking into the box and searching with CTRL-F can help you here.) Once you find it, we expect the field to lack the ``help_text`` field typically used in Django to explain what a field contains.
```python
effect_dt = fields.DateField(
null=True,
db_column="EFFECT_DT"
)
```
**Step 5**: In a separate tab, open up the <a href="Quilmes">official state documentation</a> and find the page that defines all the fields in this model.

**Step 6**: Find the row in that table's definition table that spells out what this field contains. If it lacks documentation. Note that in the ticket and close it now.

**Step 7**: Return to the GitHub tab.
**Step 8**: Add the state's label explaining what's in the field, to our field definition by inserting it a ``help_text`` argument. That should look something like this:
```python
effect_dt = fields.DateField(
null=True,
db_column="EFFECT_DT",
# Add a help_text argument like the one here, but put your string in instead.
help_text="The other values in record were effective as of this date"
)
```
**Step 9**: Scroll down below the code box and describe the change you've made in the commit message. Press the button below.

**Step 10**: Review your changes and create a pull request submitting them to the core team for inclusion.

That's it! Mission accomplished!
"""
|
mit
|
andrius-preimantas/odoo
|
addons/hr_contract/base_action_rule.py
|
389
|
2646
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013 OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.base_action_rule.base_action_rule import get_datetime
from openerp.osv import fields, osv
class base_action_rule(osv.Model):
""" Add resource and calendar for time-based conditions """
_name = 'base.action.rule'
_inherit = ['base.action.rule']
_columns = {
'trg_date_resource_field_id': fields.many2one(
'ir.model.fields', 'Use employee work schedule',
help='Use the user\'s working schedule.',
),
}
def _check_delay(self, cr, uid, action, record, record_dt, context=None):
""" Override the check of delay to try to use a user-related calendar.
If no calendar is found, fallback on the default behavior. """
if action.trg_date_calendar_id and action.trg_date_range_type == 'day' and action.trg_date_resource_field_id:
user = record[action.trg_date_resource_field_id.name]
if user.employee_ids and user.employee_ids[0].contract_id \
and user.employee_ids[0].contract_id.working_hours:
calendar = user.employee_ids[0].contract_id.working_hours
start_dt = get_datetime(record_dt)
resource_id = user.employee_ids[0].resource_id.id
action_dt = self.pool['resource.calendar'].schedule_days_get_date(
cr, uid, calendar.id, action.trg_date_range,
day_date=start_dt, compute_leaves=True, resource_id=resource_id,
context=context
)
return action_dt
return super(base_action_rule, self)._check_delay(cr, uid, action, record, record_dt, context=context)
|
agpl-3.0
|
sudheesh001/oh-mainline
|
vendor/packages/python-openid/openid/__init__.py
|
139
|
1623
|
"""
This package is an implementation of the OpenID specification in
Python. It contains code for both server and consumer
implementations. For information on implementing an OpenID consumer,
see the C{L{openid.consumer.consumer}} module. For information on
implementing an OpenID server, see the C{L{openid.server.server}}
module.
@contact: U{http://openid.net/developers/dev-mailing-lists/
<http://openid.net/developers/dev-mailing-lists/}
@copyright: (C) 2005-2008 JanRain, Inc.
@license: Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
U{http://www.apache.org/licenses/LICENSE-2.0}
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions
and limitations under the License.
"""
__version__ = '[library version:2.2.1]'[17:-1]
__all__ = [
'association',
'consumer',
'cryptutil',
'dh',
'extension',
'extensions',
'fetchers',
'kvform',
'message',
'oidutil',
'server',
'sreg',
'store',
'urinorm',
'yadis',
]
# Parse the version info
try:
version_info = map(int, __version__.split('.'))
except ValueError:
version_info = (None, None, None)
else:
if len(version_info) != 3:
version_info = (None, None, None)
else:
version_info = tuple(version_info)
|
agpl-3.0
|
groovecoder/kuma
|
vendor/packages/translate/convert/test_po2html.py
|
26
|
5722
|
#!/usr/bin/env python
from pytest import mark
from translate.convert import po2html, test_convert
from translate.misc import wStringIO
class TestPO2Html:
def converthtml(self, posource, htmltemplate, includefuzzy=False):
"""helper to exercise the command line function"""
inputfile = wStringIO.StringIO(posource)
print(inputfile.getvalue())
outputfile = wStringIO.StringIO()
templatefile = wStringIO.StringIO(htmltemplate)
assert po2html.converthtml(inputfile, outputfile, templatefile, includefuzzy)
print(outputfile.getvalue())
return outputfile.getvalue()
def test_simple(self):
"""simple po to html test"""
htmlsource = '<p>A sentence.</p>'
posource = '''#: html:3\nmsgid "A sentence."\nmsgstr "'n Sin."\n'''
htmlexpected = '''<p>'n Sin.</p>'''
assert htmlexpected in self.converthtml(posource, htmlsource)
def test_linebreaks(self):
"""Test that a po file can be merged into a template with linebreaks in it."""
htmlsource = '''<html>
<head>
</head>
<body>
<div>
A paragraph is a section in a piece of writing, usually highlighting a
particular point or topic. It always begins on a new line and usually
with indentation, and it consists of at least one sentence.
</div>
</body>
</html>
'''
posource = '''#: None:1
msgid ""
"A paragraph is a section in a piece of writing, usually highlighting a "
"particular point or topic. It always begins on a new line and usually with "
"indentation, and it consists of at least one sentence."
msgstr ""
"'n Paragraaf is 'n afdeling in 'n geskrewe stuk wat gewoonlik 'n spesifieke "
"punt uitlig. Dit begin altyd op 'n nuwe lyn (gewoonlik met indentasie) en "
"dit bestaan uit ten minste een sin."
'''
htmlexpected = '''<body>
<div>
'n Paragraaf is 'n afdeling in 'n geskrewe stuk wat gewoonlik
'n spesifieke punt uitlig. Dit begin altyd op 'n nuwe lyn
(gewoonlik met indentasie) en dit bestaan uit ten minste een
sin.
</div>
</body>'''
assert htmlexpected.replace("\n", " ") in self.converthtml(posource, htmlsource).replace("\n", " ")
@mark.xfail(reason="Not Implemented")
def test_entities(self):
"""Tests that entities are handled correctly"""
htmlsource = '<p>5 less than 6</p>'
posource = '#:html:3\nmsgid "5 less than 6"\nmsgstr "5 < 6"\n'
htmlexpected = '<p>5 < 6</p>'
assert htmlexpected in self.converthtml(posource, htmlsource)
htmlsource = '<p>Fish & chips</p>'
posource = '#: html:3\nmsgid "Fish & chips"\nmsgstr "Vis & skyfies"\n'
htmlexpected = '<p>Vis & skyfies</p>'
assert htmlexpected in self.converthtml(posource, htmlsource)
@mark.xfail(reason="Not Implemented")
def test_escapes(self):
"""Tests that PO escapes are correctly handled"""
htmlsource = '<div>Row 1<br />Row 2</div>'
posource = '#: html:3\nmsgid "Row 1\\n"\n"Row 2"\nmsgstr "Ry 1\\n"\n"Ry 2"\n'
htmlexpected = '<div>Ry 1<br />Ry 2</div>'
assert htmlexpected in self.converthtml(posource, htmlsource)
htmlsource = '<p>"leverage"</p>'
posource = '#: html3\nmsgid "\\"leverage\\""\nmsgstr "\\"ek is dom\\""\n'
htmlexpected = '<p>"ek is dom"</p>'
assert htmlexpected in self.converthtml(posource, htmlsource)
def test_states_translated(self):
"""Test that we use target when translated"""
htmlsource = '<div>aaa</div>'
posource = 'msgid "aaa"\nmsgstr "bbb"\n'
htmltarget = '<div>bbb</div>'
assert htmltarget in self.converthtml(posource, htmlsource)
assert htmlsource not in self.converthtml(posource, htmlsource)
def test_states_untranslated(self):
"""Test that we use source when a string is untranslated"""
htmlsource = '<div>aaa</div>'
posource = 'msgid "aaa"\nmsgstr ""\n'
htmltarget = htmlsource
assert htmltarget in self.converthtml(posource, htmlsource)
def test_states_fuzzy(self):
"""Test that we use source when a string is fuzzy
This fixes :issue:`3145`
"""
htmlsource = '<div>aaa</div>'
posource = '#: html:3\n#, fuzzy\nmsgid "aaa"\nmsgstr "bbb"\n'
htmltarget = '<div>bbb</div>'
# Don't use fuzzies
assert htmltarget not in self.converthtml(posource, htmlsource, includefuzzy=False)
assert htmlsource in self.converthtml(posource, htmlsource, includefuzzy=False)
# Use fuzzies
assert htmltarget in self.converthtml(posource, htmlsource, includefuzzy=True)
assert htmlsource not in self.converthtml(posource, htmlsource, includefuzzy=True)
def test_untranslated_attributes(self):
"""Verify that untranslated attributes are output as source, not dropped."""
htmlsource = '<meta name="keywords" content="life, the universe, everything" />'
posource = '#: test.html+:-1\nmsgid "life, the universe, everything"\nmsgstr ""'
expected = '<meta name="keywords" content="life, the universe, everything" />'
assert expected in self.converthtml(posource, htmlsource)
class TestPO2HtmlCommand(test_convert.TestConvertCommand, TestPO2Html):
"""Tests running actual po2oo commands on files"""
convertmodule = po2html
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "-t TEMPLATE, --template=TEMPLATE")
options = self.help_check(options, "--threshold=PERCENT")
options = self.help_check(options, "--fuzzy")
options = self.help_check(options, "--nofuzzy", last=True)
|
mpl-2.0
|
weiqiangdragonite/blog_tmp
|
python/baidu/myip.py
|
1
|
1085
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# http://ip.taobao.com/instructions.php
import socket
#
common_headers = \
"Host: ip.taobao.com\r\n" + \
"Connection: Keep-Alive\r\n" + \
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n" + \
"User-Agent: Mozilla/5.0 (X11; Linux) AppleWebKit/538.1 (KHTML, like Gecko) Chrome/18.0.1025.133 Safari/538.1 Midori/0.5\r\n" + \
"Accept-Language: en-us;q=0.750\r\n"
# 通过 GET
get_headers = \
"GET /service/getIpInfo.php?ip=myip HTTP/1.1\r\n" + \
common_headers + \
"\r\n"
# 通过 POST
post_headers = \
"POST /service/getIpInfo2.php HTTP/1.1\r\n" + \
common_headers + \
"Content-Length: 7\r\n" + \
"\r\n" + \
"ip=myip";
if __name__ == "__main__":
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("ip.taobao.com", 80))
s.send(get_headers)
buffer = []
while True:
d = s.recv(1024)
if d:
buffer.append(d)
else:
break
data = ''.join(buffer)
s.close()
print data
|
gpl-2.0
|
NKSG/ns3
|
bindings/python/apidefs/gcc-LP64/ns3_module_core.py
|
4
|
220109
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
def register_types(module):
root_module = module.get_root()
## log.h: ns3::LogLevel [enumeration]
module.add_enum('LogLevel', ['LOG_NONE', 'LOG_ERROR', 'LOG_LEVEL_ERROR', 'LOG_WARN', 'LOG_LEVEL_WARN', 'LOG_DEBUG', 'LOG_LEVEL_DEBUG', 'LOG_INFO', 'LOG_LEVEL_INFO', 'LOG_FUNCTION', 'LOG_LEVEL_FUNCTION', 'LOG_LOGIC', 'LOG_LEVEL_LOGIC', 'LOG_ALL', 'LOG_LEVEL_ALL', 'LOG_PREFIX_FUNC', 'LOG_PREFIX_TIME', 'LOG_PREFIX_NODE'])
## attribute-list.h: ns3::AttributeList [class]
module.add_class('AttributeList')
## callback.h: ns3::CallbackBase [class]
module.add_class('CallbackBase')
## command-line.h: ns3::CommandLine [class]
module.add_class('CommandLine', allow_subclassing=True)
## system-mutex.h: ns3::CriticalSection [class]
module.add_class('CriticalSection')
## global-value.h: ns3::GlobalValue [class]
module.add_class('GlobalValue')
## int-to-type.h: ns3::IntToType<0> [struct]
module.add_class('IntToType', template_parameters=['0'])
## int-to-type.h: ns3::IntToType<0>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 0 >'])
## int-to-type.h: ns3::IntToType<1> [struct]
module.add_class('IntToType', template_parameters=['1'])
## int-to-type.h: ns3::IntToType<1>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 1 >'])
## int-to-type.h: ns3::IntToType<2> [struct]
module.add_class('IntToType', template_parameters=['2'])
## int-to-type.h: ns3::IntToType<2>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 2 >'])
## int-to-type.h: ns3::IntToType<3> [struct]
module.add_class('IntToType', template_parameters=['3'])
## int-to-type.h: ns3::IntToType<3>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 3 >'])
## int-to-type.h: ns3::IntToType<4> [struct]
module.add_class('IntToType', template_parameters=['4'])
## int-to-type.h: ns3::IntToType<4>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 4 >'])
## int-to-type.h: ns3::IntToType<5> [struct]
module.add_class('IntToType', template_parameters=['5'])
## int-to-type.h: ns3::IntToType<5>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 5 >'])
## int-to-type.h: ns3::IntToType<6> [struct]
module.add_class('IntToType', template_parameters=['6'])
## int-to-type.h: ns3::IntToType<6>::v_e [enumeration]
module.add_enum('v_e', ['value'], outer_class=root_module['ns3::IntToType< 6 >'])
## names.h: ns3::Names [class]
module.add_class('Names')
## object-base.h: ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True)
## object.h: ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter')
## object-factory.h: ns3::ObjectFactory [class]
module.add_class('ObjectFactory')
## random-variable.h: ns3::RandomVariable [class]
module.add_class('RandomVariable')
## rng-stream.h: ns3::RngStream [class]
module.add_class('RngStream')
## random-variable.h: ns3::SeedManager [class]
module.add_class('SeedManager')
## random-variable.h: ns3::SequentialVariable [class]
module.add_class('SequentialVariable', parent=root_module['ns3::RandomVariable'])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## system-condition.h: ns3::SystemCondition [class]
module.add_class('SystemCondition')
## system-mutex.h: ns3::SystemMutex [class]
module.add_class('SystemMutex')
## system-wall-clock-ms.h: ns3::SystemWallClockMs [class]
module.add_class('SystemWallClockMs')
## test.h: ns3::TestCase [class]
module.add_class('TestCase', allow_subclassing=True)
## test.h: ns3::TestRunner [class]
module.add_class('TestRunner')
## test.h: ns3::TestSuite [class]
module.add_class('TestSuite', allow_subclassing=True)
## test.h: ns3::TestSuite::TestType [enumeration]
module.add_enum('TestType', ['BVT', 'UNIT', 'SYSTEM', 'EXAMPLE', 'PERFORMANCE'], outer_class=root_module['ns3::TestSuite'])
## traced-value.h: ns3::TracedValue<double> [class]
module.add_class('TracedValue', template_parameters=['double'])
## random-variable.h: ns3::TriangularVariable [class]
module.add_class('TriangularVariable', parent=root_module['ns3::RandomVariable'])
## type-id.h: ns3::TypeId [class]
module.add_class('TypeId')
## type-id.h: ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'])
## type-id.h: ns3::TypeId::AttributeInfo [struct]
module.add_class('AttributeInfo', outer_class=root_module['ns3::TypeId'])
## random-variable.h: ns3::UniformVariable [class]
module.add_class('UniformVariable', parent=root_module['ns3::RandomVariable'])
## attribute-list.h: ns3::UnsafeAttributeList [class]
module.add_class('UnsafeAttributeList')
## vector.h: ns3::Vector2D [class]
module.add_class('Vector2D')
## vector.h: ns3::Vector3D [class]
module.add_class('Vector3D')
## random-variable.h: ns3::WeibullVariable [class]
module.add_class('WeibullVariable', parent=root_module['ns3::RandomVariable'])
## random-variable.h: ns3::ZetaVariable [class]
module.add_class('ZetaVariable', parent=root_module['ns3::RandomVariable'])
## random-variable.h: ns3::ZipfVariable [class]
module.add_class('ZipfVariable', parent=root_module['ns3::RandomVariable'])
## empty.h: ns3::empty [class]
module.add_class('empty')
## random-variable.h: ns3::ConstantVariable [class]
module.add_class('ConstantVariable', parent=root_module['ns3::RandomVariable'])
## random-variable.h: ns3::DeterministicVariable [class]
module.add_class('DeterministicVariable', parent=root_module['ns3::RandomVariable'])
## random-variable.h: ns3::EmpiricalVariable [class]
module.add_class('EmpiricalVariable', parent=root_module['ns3::RandomVariable'])
## random-variable.h: ns3::ErlangVariable [class]
module.add_class('ErlangVariable', parent=root_module['ns3::RandomVariable'])
## random-variable.h: ns3::ExponentialVariable [class]
module.add_class('ExponentialVariable', parent=root_module['ns3::RandomVariable'])
## random-variable.h: ns3::GammaVariable [class]
module.add_class('GammaVariable', parent=root_module['ns3::RandomVariable'])
## random-variable.h: ns3::IntEmpiricalVariable [class]
module.add_class('IntEmpiricalVariable', parent=root_module['ns3::EmpiricalVariable'])
## random-variable.h: ns3::LogNormalVariable [class]
module.add_class('LogNormalVariable', parent=root_module['ns3::RandomVariable'])
## random-variable.h: ns3::NormalVariable [class]
module.add_class('NormalVariable', parent=root_module['ns3::RandomVariable'])
## object.h: ns3::Object [class]
module.add_class('Object', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h: ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', outer_class=root_module['ns3::Object'])
## random-variable.h: ns3::ParetoVariable [class]
module.add_class('ParetoVariable', parent=root_module['ns3::RandomVariable'])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h: ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h: ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h: ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h: ns3::SimpleRefCount<ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter<ns3::FlowClassifier> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::FlowClassifier', 'ns3::empty', 'ns3::DefaultDeleter<ns3::FlowClassifier>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h: ns3::SimpleRefCount<ns3::FlowProbe, ns3::empty, ns3::DefaultDeleter<ns3::FlowProbe> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::FlowProbe', 'ns3::empty', 'ns3::DefaultDeleter<ns3::FlowProbe>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h: ns3::SimpleRefCount<ns3::IdealControlMessage, ns3::empty, ns3::DefaultDeleter<ns3::IdealControlMessage> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::IdealControlMessage', 'ns3::empty', 'ns3::DefaultDeleter<ns3::IdealControlMessage>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h: ns3::SimpleRefCount<ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::RefCountBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::RefCountBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h: ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::SystemThread', 'ns3::empty', 'ns3::DefaultDeleter<ns3::SystemThread>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h: ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## system-thread.h: ns3::SystemThread [class]
module.add_class('SystemThread', parent=root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >'])
## trace-source-accessor.h: ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## attribute.h: ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h: ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h: ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## boolean.h: ns3::BooleanChecker [class]
module.add_class('BooleanChecker', parent=root_module['ns3::AttributeChecker'])
## boolean.h: ns3::BooleanValue [class]
module.add_class('BooleanValue', parent=root_module['ns3::AttributeValue'])
## callback.h: ns3::CallbackChecker [class]
module.add_class('CallbackChecker', parent=root_module['ns3::AttributeChecker'])
## callback.h: ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h: ns3::CallbackValue [class]
module.add_class('CallbackValue', parent=root_module['ns3::AttributeValue'])
## double.h: ns3::DoubleValue [class]
module.add_class('DoubleValue', parent=root_module['ns3::AttributeValue'])
## attribute.h: ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', parent=root_module['ns3::AttributeValue'])
## enum.h: ns3::EnumChecker [class]
module.add_class('EnumChecker', parent=root_module['ns3::AttributeChecker'])
## enum.h: ns3::EnumValue [class]
module.add_class('EnumValue', parent=root_module['ns3::AttributeValue'])
## integer.h: ns3::IntegerValue [class]
module.add_class('IntegerValue', parent=root_module['ns3::AttributeValue'])
## object-factory.h: ns3::ObjectFactoryChecker [class]
module.add_class('ObjectFactoryChecker', parent=root_module['ns3::AttributeChecker'])
## object-factory.h: ns3::ObjectFactoryValue [class]
module.add_class('ObjectFactoryValue', parent=root_module['ns3::AttributeValue'])
## object-vector.h: ns3::ObjectVectorAccessor [class]
module.add_class('ObjectVectorAccessor', parent=root_module['ns3::AttributeAccessor'])
## object-vector.h: ns3::ObjectVectorChecker [class]
module.add_class('ObjectVectorChecker', parent=root_module['ns3::AttributeChecker'])
## object-vector.h: ns3::ObjectVectorValue [class]
module.add_class('ObjectVectorValue', parent=root_module['ns3::AttributeValue'])
## pointer.h: ns3::PointerChecker [class]
module.add_class('PointerChecker', parent=root_module['ns3::AttributeChecker'])
## pointer.h: ns3::PointerValue [class]
module.add_class('PointerValue', parent=root_module['ns3::AttributeValue'])
## random-variable.h: ns3::RandomVariableChecker [class]
module.add_class('RandomVariableChecker', parent=root_module['ns3::AttributeChecker'])
## random-variable.h: ns3::RandomVariableValue [class]
module.add_class('RandomVariableValue', parent=root_module['ns3::AttributeValue'])
## ref-count-base.h: ns3::RefCountBase [class]
module.add_class('RefCountBase', parent=root_module['ns3::SimpleRefCount< ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >'])
## string.h: ns3::StringChecker [class]
module.add_class('StringChecker', parent=root_module['ns3::AttributeChecker'])
## string.h: ns3::StringValue [class]
module.add_class('StringValue', parent=root_module['ns3::AttributeValue'])
## type-id.h: ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', parent=root_module['ns3::AttributeChecker'])
## type-id.h: ns3::TypeIdValue [class]
module.add_class('TypeIdValue', parent=root_module['ns3::AttributeValue'])
## uinteger.h: ns3::UintegerValue [class]
module.add_class('UintegerValue', parent=root_module['ns3::AttributeValue'])
## vector.h: ns3::Vector2DChecker [class]
module.add_class('Vector2DChecker', parent=root_module['ns3::AttributeChecker'])
## vector.h: ns3::Vector2DValue [class]
module.add_class('Vector2DValue', parent=root_module['ns3::AttributeValue'])
## vector.h: ns3::Vector3DChecker [class]
module.add_class('Vector3DChecker', parent=root_module['ns3::AttributeChecker'])
## vector.h: ns3::Vector3DValue [class]
module.add_class('Vector3DValue', parent=root_module['ns3::AttributeValue'])
module.add_container('std::set< ns3::TypeId >', 'ns3::TypeId', container_type='set')
module.add_container('std::list< ns3::Ptr< ns3::SpectrumPhy > >', 'ns3::Ptr< ns3::SpectrumPhy >', container_type='list')
module.add_container('std::list< ns3::Ptr< ns3::Packet > >', 'ns3::Ptr< ns3::Packet >', container_type='list')
module.add_container('std::vector< ns3::Ptr< ns3::RadioBearerInstance > >', 'ns3::Ptr< ns3::RadioBearerInstance >', container_type='vector')
module.add_container('std::list< ns3::Ptr< ns3::UanPhy > >', 'ns3::Ptr< ns3::UanPhy >', container_type='list')
module.add_container('std::vector< ns3::Ptr< ns3::UeRecord > >', 'ns3::Ptr< ns3::UeRecord >', container_type='vector')
module.add_container('std::list< ns3::Ptr< ns3::UlJob > >', 'ns3::Ptr< ns3::UlJob >', container_type='list')
module.add_container('std::list< ns3::Ptr< ns3::Packet const > >', 'ns3::Ptr< ns3::Packet const >', container_type='list')
module.add_container('std::vector< ns3::Ptr< ns3::WimaxConnection > >', 'ns3::Ptr< ns3::WimaxConnection >', container_type='vector')
module.add_container('std::vector< ns3::Ptr< ns3::FlowProbe > >', 'ns3::Ptr< ns3::FlowProbe >', container_type='vector')
module.add_container('std::list< ns3::Ptr< ns3::Socket > >', 'ns3::Ptr< ns3::Socket >', container_type='list')
module.add_container('std::list< ns3::Ptr< ns3::RadvdPrefix > >', 'ns3::Ptr< ns3::RadvdPrefix >', container_type='list')
module.add_container('std::list< ns3::Ptr< ns3::UanTransducer > >', 'ns3::Ptr< ns3::UanTransducer >', container_type='list')
module.add_container('std::vector< ns3::Ptr< ns3::NetDevice > >', 'ns3::Ptr< ns3::NetDevice >', container_type='vector')
module.add_container('std::vector< ns3::Ptr< ns3::SpectrumPhy > >', 'ns3::Ptr< ns3::SpectrumPhy >', container_type='vector')
typehandlers.add_type_alias('ns3::Vector3DChecker', 'ns3::VectorChecker')
typehandlers.add_type_alias('ns3::Vector3DChecker*', 'ns3::VectorChecker*')
typehandlers.add_type_alias('ns3::Vector3DChecker&', 'ns3::VectorChecker&')
module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker')
typehandlers.add_type_alias('ns3::Vector3D', 'ns3::Vector')
typehandlers.add_type_alias('ns3::Vector3D*', 'ns3::Vector*')
typehandlers.add_type_alias('ns3::Vector3D&', 'ns3::Vector&')
module.add_typedef(root_module['ns3::Vector3D'], 'Vector')
typehandlers.add_type_alias('ns3::Vector3DValue', 'ns3::VectorValue')
typehandlers.add_type_alias('ns3::Vector3DValue*', 'ns3::VectorValue*')
typehandlers.add_type_alias('ns3::Vector3DValue&', 'ns3::VectorValue&')
module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue')
## Register a nested module for the namespace Config
nested_module = module.add_cpp_namespace('Config')
register_types_ns3_Config(nested_module)
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace addressUtils
nested_module = module.add_cpp_namespace('addressUtils')
register_types_ns3_addressUtils(nested_module)
## Register a nested module for the namespace aodv
nested_module = module.add_cpp_namespace('aodv')
register_types_ns3_aodv(nested_module)
## Register a nested module for the namespace dot11s
nested_module = module.add_cpp_namespace('dot11s')
register_types_ns3_dot11s(nested_module)
## Register a nested module for the namespace dsdv
nested_module = module.add_cpp_namespace('dsdv')
register_types_ns3_dsdv(nested_module)
## Register a nested module for the namespace flame
nested_module = module.add_cpp_namespace('flame')
register_types_ns3_flame(nested_module)
## Register a nested module for the namespace internal
nested_module = module.add_cpp_namespace('internal')
register_types_ns3_internal(nested_module)
## Register a nested module for the namespace olsr
nested_module = module.add_cpp_namespace('olsr')
register_types_ns3_olsr(nested_module)
def register_types_ns3_Config(module):
root_module = module.get_root()
## config.h: ns3::Config::MatchContainer [class]
module.add_class('MatchContainer')
module.add_container('std::vector< ns3::Ptr< ns3::Object > >', 'ns3::Ptr< ns3::Object >', container_type='vector')
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_addressUtils(module):
root_module = module.get_root()
def register_types_ns3_aodv(module):
root_module = module.get_root()
def register_types_ns3_dot11s(module):
root_module = module.get_root()
module.add_container('std::vector< ns3::Ptr< ns3::dot11s::IeBeaconTimingUnit > >', 'ns3::Ptr< ns3::dot11s::IeBeaconTimingUnit >', container_type='vector')
module.add_container('std::vector< ns3::Ptr< ns3::dot11s::PeerLink > >', 'ns3::Ptr< ns3::dot11s::PeerLink >', container_type='vector')
def register_types_ns3_dsdv(module):
root_module = module.get_root()
def register_types_ns3_flame(module):
root_module = module.get_root()
def register_types_ns3_internal(module):
root_module = module.get_root()
def register_types_ns3_olsr(module):
root_module = module.get_root()
def register_methods(root_module):
register_Ns3AttributeList_methods(root_module, root_module['ns3::AttributeList'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3CommandLine_methods(root_module, root_module['ns3::CommandLine'])
register_Ns3CriticalSection_methods(root_module, root_module['ns3::CriticalSection'])
register_Ns3GlobalValue_methods(root_module, root_module['ns3::GlobalValue'])
register_Ns3IntToType__0_methods(root_module, root_module['ns3::IntToType< 0 >'])
register_Ns3IntToType__1_methods(root_module, root_module['ns3::IntToType< 1 >'])
register_Ns3IntToType__2_methods(root_module, root_module['ns3::IntToType< 2 >'])
register_Ns3IntToType__3_methods(root_module, root_module['ns3::IntToType< 3 >'])
register_Ns3IntToType__4_methods(root_module, root_module['ns3::IntToType< 4 >'])
register_Ns3IntToType__5_methods(root_module, root_module['ns3::IntToType< 5 >'])
register_Ns3IntToType__6_methods(root_module, root_module['ns3::IntToType< 6 >'])
register_Ns3Names_methods(root_module, root_module['ns3::Names'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3ObjectFactory_methods(root_module, root_module['ns3::ObjectFactory'])
register_Ns3RandomVariable_methods(root_module, root_module['ns3::RandomVariable'])
register_Ns3RngStream_methods(root_module, root_module['ns3::RngStream'])
register_Ns3SeedManager_methods(root_module, root_module['ns3::SeedManager'])
register_Ns3SequentialVariable_methods(root_module, root_module['ns3::SequentialVariable'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3SystemCondition_methods(root_module, root_module['ns3::SystemCondition'])
register_Ns3SystemMutex_methods(root_module, root_module['ns3::SystemMutex'])
register_Ns3SystemWallClockMs_methods(root_module, root_module['ns3::SystemWallClockMs'])
register_Ns3TestCase_methods(root_module, root_module['ns3::TestCase'])
register_Ns3TestRunner_methods(root_module, root_module['ns3::TestRunner'])
register_Ns3TestSuite_methods(root_module, root_module['ns3::TestSuite'])
register_Ns3TracedValue__Double_methods(root_module, root_module['ns3::TracedValue< double >'])
register_Ns3TracedValue__Ns3Time_methods(root_module, root_module['ns3::TracedValue< ns3::Time >'])
register_Ns3TriangularVariable_methods(root_module, root_module['ns3::TriangularVariable'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInfo_methods(root_module, root_module['ns3::TypeId::AttributeInfo'])
register_Ns3UniformVariable_methods(root_module, root_module['ns3::UniformVariable'])
register_Ns3UnsafeAttributeList_methods(root_module, root_module['ns3::UnsafeAttributeList'])
register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D'])
register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D'])
register_Ns3WeibullVariable_methods(root_module, root_module['ns3::WeibullVariable'])
register_Ns3ZetaVariable_methods(root_module, root_module['ns3::ZetaVariable'])
register_Ns3ZipfVariable_methods(root_module, root_module['ns3::ZipfVariable'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3ConstantVariable_methods(root_module, root_module['ns3::ConstantVariable'])
register_Ns3DeterministicVariable_methods(root_module, root_module['ns3::DeterministicVariable'])
register_Ns3EmpiricalVariable_methods(root_module, root_module['ns3::EmpiricalVariable'])
register_Ns3ErlangVariable_methods(root_module, root_module['ns3::ErlangVariable'])
register_Ns3ExponentialVariable_methods(root_module, root_module['ns3::ExponentialVariable'])
register_Ns3GammaVariable_methods(root_module, root_module['ns3::GammaVariable'])
register_Ns3IntEmpiricalVariable_methods(root_module, root_module['ns3::IntEmpiricalVariable'])
register_Ns3LogNormalVariable_methods(root_module, root_module['ns3::LogNormalVariable'])
register_Ns3NormalVariable_methods(root_module, root_module['ns3::NormalVariable'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3ParetoVariable_methods(root_module, root_module['ns3::ParetoVariable'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >'])
register_Ns3SimpleRefCount__Ns3FlowClassifier_Ns3Empty_Ns3DefaultDeleter__lt__ns3FlowClassifier__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter<ns3::FlowClassifier> >'])
register_Ns3SimpleRefCount__Ns3FlowProbe_Ns3Empty_Ns3DefaultDeleter__lt__ns3FlowProbe__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::FlowProbe, ns3::empty, ns3::DefaultDeleter<ns3::FlowProbe> >'])
register_Ns3SimpleRefCount__Ns3IdealControlMessage_Ns3Empty_Ns3DefaultDeleter__lt__ns3IdealControlMessage__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::IdealControlMessage, ns3::empty, ns3::DefaultDeleter<ns3::IdealControlMessage> >'])
register_Ns3SimpleRefCount__Ns3InterferenceHelperEvent_Ns3Empty_Ns3DefaultDeleter__lt__ns3InterferenceHelperEvent__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::InterferenceHelper::Event, ns3::empty, ns3::DefaultDeleter<ns3::InterferenceHelper::Event> >'])
register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >'])
register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >'])
register_Ns3SimpleRefCount__Ns3Ipv6MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv6MulticastRoute__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv6MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv6MulticastRoute> >'])
register_Ns3SimpleRefCount__Ns3Ipv6Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv6Route__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Ipv6Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv6Route> >'])
register_Ns3SimpleRefCount__Ns3MeshWifiInterfaceMacPlugin_Ns3Empty_Ns3DefaultDeleter__lt__ns3MeshWifiInterfaceMacPlugin__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::MeshWifiInterfaceMacPlugin, ns3::empty, ns3::DefaultDeleter<ns3::MeshWifiInterfaceMacPlugin> >'])
register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >'])
register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >'])
register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >'])
register_Ns3SimpleRefCount__Ns3PbbAddressBlock_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbAddressBlock__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter<ns3::PbbAddressBlock> >'])
register_Ns3SimpleRefCount__Ns3PbbMessage_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbMessage__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter<ns3::PbbMessage> >'])
register_Ns3SimpleRefCount__Ns3PbbPacket_Ns3Header_Ns3DefaultDeleter__lt__ns3PbbPacket__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter<ns3::PbbPacket> >'])
register_Ns3SimpleRefCount__Ns3PbbTlv_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbTlv__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter<ns3::PbbTlv> >'])
register_Ns3SimpleRefCount__Ns3RadvdInterface_Ns3Empty_Ns3DefaultDeleter__lt__ns3RadvdInterface__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::RadvdInterface, ns3::empty, ns3::DefaultDeleter<ns3::RadvdInterface> >'])
register_Ns3SimpleRefCount__Ns3RadvdPrefix_Ns3Empty_Ns3DefaultDeleter__lt__ns3RadvdPrefix__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::RadvdPrefix, ns3::empty, ns3::DefaultDeleter<ns3::RadvdPrefix> >'])
register_Ns3SimpleRefCount__Ns3RefCountBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3RefCountBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >'])
register_Ns3SimpleRefCount__Ns3SpectrumConverter_Ns3Empty_Ns3DefaultDeleter__lt__ns3SpectrumConverter__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::SpectrumConverter, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumConverter> >'])
register_Ns3SimpleRefCount__Ns3SpectrumModel_Ns3Empty_Ns3DefaultDeleter__lt__ns3SpectrumModel__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::SpectrumModel, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumModel> >'])
register_Ns3SimpleRefCount__Ns3SpectrumValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3SpectrumValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::SpectrumValue, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumValue> >'])
register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3SimpleRefCount__Ns3WifiInformationElement_Ns3Empty_Ns3DefaultDeleter__lt__ns3WifiInformationElement__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::WifiInformationElement, ns3::empty, ns3::DefaultDeleter<ns3::WifiInformationElement> >'])
register_Ns3SimpleRefCount__Ns3Dot11sIeBeaconTimingUnit_Ns3Empty_Ns3DefaultDeleter__lt__ns3Dot11sIeBeaconTimingUnit__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::dot11s::IeBeaconTimingUnit, ns3::empty, ns3::DefaultDeleter<ns3::dot11s::IeBeaconTimingUnit> >'])
register_Ns3SystemThread_methods(root_module, root_module['ns3::SystemThread'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3BooleanChecker_methods(root_module, root_module['ns3::BooleanChecker'])
register_Ns3BooleanValue_methods(root_module, root_module['ns3::BooleanValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3DoubleValue_methods(root_module, root_module['ns3::DoubleValue'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3EnumChecker_methods(root_module, root_module['ns3::EnumChecker'])
register_Ns3EnumValue_methods(root_module, root_module['ns3::EnumValue'])
register_Ns3IntegerValue_methods(root_module, root_module['ns3::IntegerValue'])
register_Ns3ObjectFactoryChecker_methods(root_module, root_module['ns3::ObjectFactoryChecker'])
register_Ns3ObjectFactoryValue_methods(root_module, root_module['ns3::ObjectFactoryValue'])
register_Ns3ObjectVectorAccessor_methods(root_module, root_module['ns3::ObjectVectorAccessor'])
register_Ns3ObjectVectorChecker_methods(root_module, root_module['ns3::ObjectVectorChecker'])
register_Ns3ObjectVectorValue_methods(root_module, root_module['ns3::ObjectVectorValue'])
register_Ns3PointerChecker_methods(root_module, root_module['ns3::PointerChecker'])
register_Ns3PointerValue_methods(root_module, root_module['ns3::PointerValue'])
register_Ns3RandomVariableChecker_methods(root_module, root_module['ns3::RandomVariableChecker'])
register_Ns3RandomVariableValue_methods(root_module, root_module['ns3::RandomVariableValue'])
register_Ns3RefCountBase_methods(root_module, root_module['ns3::RefCountBase'])
register_Ns3StringChecker_methods(root_module, root_module['ns3::StringChecker'])
register_Ns3StringValue_methods(root_module, root_module['ns3::StringValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3UintegerValue_methods(root_module, root_module['ns3::UintegerValue'])
register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker'])
register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue'])
register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker'])
register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue'])
register_Ns3ConfigMatchContainer_methods(root_module, root_module['ns3::Config::MatchContainer'])
return
def register_Ns3AttributeList_methods(root_module, cls):
## attribute-list.h: ns3::AttributeList::AttributeList() [constructor]
cls.add_constructor([])
## attribute-list.h: ns3::AttributeList::AttributeList(ns3::AttributeList const & o) [copy constructor]
cls.add_constructor([param('ns3::AttributeList const &', 'o')])
## attribute-list.h: bool ns3::AttributeList::DeserializeFromString(std::string value) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value')])
## attribute-list.h: static ns3::AttributeList * ns3::AttributeList::GetGlobal() [member function]
cls.add_method('GetGlobal',
'ns3::AttributeList *',
[],
is_static=True)
## attribute-list.h: void ns3::AttributeList::Reset() [member function]
cls.add_method('Reset',
'void',
[])
## attribute-list.h: std::string ns3::AttributeList::SerializeToString() const [member function]
cls.add_method('SerializeToString',
'std::string',
[],
is_const=True)
## attribute-list.h: void ns3::AttributeList::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## attribute-list.h: bool ns3::AttributeList::SetFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## attribute-list.h: void ns3::AttributeList::SetWithTid(ns3::TypeId tid, std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetWithTid',
'void',
[param('ns3::TypeId', 'tid'), param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h: ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h: ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h: ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h: ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h: static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3CommandLine_methods(root_module, cls):
## command-line.h: ns3::CommandLine::CommandLine() [constructor]
cls.add_constructor([])
## command-line.h: ns3::CommandLine::CommandLine(ns3::CommandLine const & cmd) [copy constructor]
cls.add_constructor([param('ns3::CommandLine const &', 'cmd')])
## command-line.h: void ns3::CommandLine::AddValue(std::string const & name, std::string const & help, ns3::Callback<bool, std::string, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [member function]
cls.add_method('AddValue',
'void',
[param('std::string const &', 'name'), param('std::string const &', 'help'), param('ns3::Callback< bool, std::string, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
return
def register_Ns3CriticalSection_methods(root_module, cls):
## system-mutex.h: ns3::CriticalSection::CriticalSection(ns3::CriticalSection const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CriticalSection const &', 'arg0')])
## system-mutex.h: ns3::CriticalSection::CriticalSection(ns3::SystemMutex & mutex) [constructor]
cls.add_constructor([param('ns3::SystemMutex &', 'mutex')])
return
def register_Ns3GlobalValue_methods(root_module, cls):
## global-value.h: ns3::GlobalValue::GlobalValue(ns3::GlobalValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GlobalValue const &', 'arg0')])
## global-value.h: ns3::GlobalValue::GlobalValue(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeChecker const> checker) [constructor]
cls.add_constructor([param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## global-value.h: static __gnu_cxx::__normal_iterator<ns3::GlobalValue* const*,std::vector<ns3::GlobalValue*, std::allocator<ns3::GlobalValue*> > > ns3::GlobalValue::Begin() [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::GlobalValue * const *, std::vector< ns3::GlobalValue * > >',
[],
is_static=True)
## global-value.h: static void ns3::GlobalValue::Bind(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Bind',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')],
is_static=True)
## global-value.h: static bool ns3::GlobalValue::BindFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('BindFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')],
is_static=True)
## global-value.h: static __gnu_cxx::__normal_iterator<ns3::GlobalValue* const*,std::vector<ns3::GlobalValue*, std::allocator<ns3::GlobalValue*> > > ns3::GlobalValue::End() [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::GlobalValue * const *, std::vector< ns3::GlobalValue * > >',
[],
is_static=True)
## global-value.h: ns3::Ptr<ns3::AttributeChecker const> ns3::GlobalValue::GetChecker() const [member function]
cls.add_method('GetChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[],
is_const=True)
## global-value.h: std::string ns3::GlobalValue::GetHelp() const [member function]
cls.add_method('GetHelp',
'std::string',
[],
is_const=True)
## global-value.h: std::string ns3::GlobalValue::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## global-value.h: void ns3::GlobalValue::GetValue(ns3::AttributeValue & value) const [member function]
cls.add_method('GetValue',
'void',
[param('ns3::AttributeValue &', 'value')],
is_const=True)
## global-value.h: static void ns3::GlobalValue::GetValueByName(std::string name, ns3::AttributeValue & value) [member function]
cls.add_method('GetValueByName',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_static=True)
## global-value.h: static bool ns3::GlobalValue::GetValueByNameFailSafe(std::string name, ns3::AttributeValue & value) [member function]
cls.add_method('GetValueByNameFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_static=True)
## global-value.h: bool ns3::GlobalValue::SetValue(ns3::AttributeValue const & value) [member function]
cls.add_method('SetValue',
'bool',
[param('ns3::AttributeValue const &', 'value')])
return
def register_Ns3IntToType__0_methods(root_module, cls):
## int-to-type.h: ns3::IntToType<0>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h: ns3::IntToType<0>::IntToType(ns3::IntToType<0> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 0 > const &', 'arg0')])
return
def register_Ns3IntToType__1_methods(root_module, cls):
## int-to-type.h: ns3::IntToType<1>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h: ns3::IntToType<1>::IntToType(ns3::IntToType<1> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 1 > const &', 'arg0')])
return
def register_Ns3IntToType__2_methods(root_module, cls):
## int-to-type.h: ns3::IntToType<2>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h: ns3::IntToType<2>::IntToType(ns3::IntToType<2> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 2 > const &', 'arg0')])
return
def register_Ns3IntToType__3_methods(root_module, cls):
## int-to-type.h: ns3::IntToType<3>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h: ns3::IntToType<3>::IntToType(ns3::IntToType<3> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 3 > const &', 'arg0')])
return
def register_Ns3IntToType__4_methods(root_module, cls):
## int-to-type.h: ns3::IntToType<4>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h: ns3::IntToType<4>::IntToType(ns3::IntToType<4> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 4 > const &', 'arg0')])
return
def register_Ns3IntToType__5_methods(root_module, cls):
## int-to-type.h: ns3::IntToType<5>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h: ns3::IntToType<5>::IntToType(ns3::IntToType<5> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 5 > const &', 'arg0')])
return
def register_Ns3IntToType__6_methods(root_module, cls):
## int-to-type.h: ns3::IntToType<6>::IntToType() [constructor]
cls.add_constructor([])
## int-to-type.h: ns3::IntToType<6>::IntToType(ns3::IntToType<6> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntToType< 6 > const &', 'arg0')])
return
def register_Ns3Names_methods(root_module, cls):
## names.h: ns3::Names::Names() [constructor]
cls.add_constructor([])
## names.h: ns3::Names::Names(ns3::Names const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Names const &', 'arg0')])
## names.h: static void ns3::Names::Add(std::string name, ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
## names.h: static void ns3::Names::Add(std::string path, std::string name, ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'path'), param('std::string', 'name'), param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
## names.h: static void ns3::Names::Add(ns3::Ptr<ns3::Object> context, std::string name, ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('Add',
'void',
[param('ns3::Ptr< ns3::Object >', 'context'), param('std::string', 'name'), param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
## names.h: static void ns3::Names::Clear() [member function]
cls.add_method('Clear',
'void',
[],
is_static=True)
## names.h: static std::string ns3::Names::FindName(ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('FindName',
'std::string',
[param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
## names.h: static std::string ns3::Names::FindPath(ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('FindPath',
'std::string',
[param('ns3::Ptr< ns3::Object >', 'object')],
is_static=True)
## names.h: static void ns3::Names::Rename(std::string oldpath, std::string newname) [member function]
cls.add_method('Rename',
'void',
[param('std::string', 'oldpath'), param('std::string', 'newname')],
is_static=True)
## names.h: static void ns3::Names::Rename(std::string path, std::string oldname, std::string newname) [member function]
cls.add_method('Rename',
'void',
[param('std::string', 'path'), param('std::string', 'oldname'), param('std::string', 'newname')],
is_static=True)
## names.h: static void ns3::Names::Rename(ns3::Ptr<ns3::Object> context, std::string oldname, std::string newname) [member function]
cls.add_method('Rename',
'void',
[param('ns3::Ptr< ns3::Object >', 'context'), param('std::string', 'oldname'), param('std::string', 'newname')],
is_static=True)
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h: ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h: ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h: void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h: bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h: ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h: static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h: void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h: bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h: bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h: bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h: bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h: bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h: void ns3::ObjectBase::ConstructSelf(ns3::AttributeList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeList const &', 'attributes')],
visibility='protected')
## object-base.h: void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h: ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h: ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h: static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3ObjectFactory_methods(root_module, cls):
cls.add_output_stream_operator()
## object-factory.h: ns3::ObjectFactory::ObjectFactory(ns3::ObjectFactory const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'arg0')])
## object-factory.h: ns3::ObjectFactory::ObjectFactory() [constructor]
cls.add_constructor([])
## object-factory.h: ns3::Ptr<ns3::Object> ns3::ObjectFactory::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## object-factory.h: ns3::TypeId ns3::ObjectFactory::GetTypeId() const [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_const=True)
## object-factory.h: void ns3::ObjectFactory::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-factory.h: void ns3::ObjectFactory::Set(ns3::AttributeList const & list) [member function]
cls.add_method('Set',
'void',
[param('ns3::AttributeList const &', 'list')])
## object-factory.h: void ns3::ObjectFactory::SetTypeId(ns3::TypeId tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('ns3::TypeId', 'tid')])
## object-factory.h: void ns3::ObjectFactory::SetTypeId(char const * tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('char const *', 'tid')])
## object-factory.h: void ns3::ObjectFactory::SetTypeId(std::string tid) [member function]
cls.add_method('SetTypeId',
'void',
[param('std::string', 'tid')])
return
def register_Ns3RandomVariable_methods(root_module, cls):
cls.add_output_stream_operator()
## random-variable.h: ns3::RandomVariable::RandomVariable() [constructor]
cls.add_constructor([])
## random-variable.h: ns3::RandomVariable::RandomVariable(ns3::RandomVariable const & o) [copy constructor]
cls.add_constructor([param('ns3::RandomVariable const &', 'o')])
## random-variable.h: uint32_t ns3::RandomVariable::GetInteger() const [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_const=True)
## random-variable.h: double ns3::RandomVariable::GetValue() const [member function]
cls.add_method('GetValue',
'double',
[],
is_const=True)
return
def register_Ns3RngStream_methods(root_module, cls):
## rng-stream.h: ns3::RngStream::RngStream() [constructor]
cls.add_constructor([])
## rng-stream.h: ns3::RngStream::RngStream(ns3::RngStream const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RngStream const &', 'arg0')])
## rng-stream.h: void ns3::RngStream::AdvanceState(int32_t e, int32_t c) [member function]
cls.add_method('AdvanceState',
'void',
[param('int32_t', 'e'), param('int32_t', 'c')])
## rng-stream.h: static bool ns3::RngStream::CheckSeed(uint32_t const * seed) [member function]
cls.add_method('CheckSeed',
'bool',
[param('uint32_t const *', 'seed')],
is_static=True)
## rng-stream.h: static bool ns3::RngStream::CheckSeed(uint32_t seed) [member function]
cls.add_method('CheckSeed',
'bool',
[param('uint32_t', 'seed')],
is_static=True)
## rng-stream.h: static uint32_t ns3::RngStream::GetPackageRun() [member function]
cls.add_method('GetPackageRun',
'uint32_t',
[],
is_static=True)
## rng-stream.h: static void ns3::RngStream::GetPackageSeed(uint32_t * seed) [member function]
cls.add_method('GetPackageSeed',
'void',
[param('uint32_t *', 'seed')],
is_static=True)
## rng-stream.h: void ns3::RngStream::GetState(uint32_t * seed) const [member function]
cls.add_method('GetState',
'void',
[param('uint32_t *', 'seed')],
is_const=True)
## rng-stream.h: void ns3::RngStream::IncreasedPrecis(bool incp) [member function]
cls.add_method('IncreasedPrecis',
'void',
[param('bool', 'incp')])
## rng-stream.h: void ns3::RngStream::InitializeStream() [member function]
cls.add_method('InitializeStream',
'void',
[])
## rng-stream.h: int32_t ns3::RngStream::RandInt(int32_t i, int32_t j) [member function]
cls.add_method('RandInt',
'int32_t',
[param('int32_t', 'i'), param('int32_t', 'j')])
## rng-stream.h: double ns3::RngStream::RandU01() [member function]
cls.add_method('RandU01',
'double',
[])
## rng-stream.h: void ns3::RngStream::ResetNextSubstream() [member function]
cls.add_method('ResetNextSubstream',
'void',
[])
## rng-stream.h: void ns3::RngStream::ResetNthSubstream(uint32_t N) [member function]
cls.add_method('ResetNthSubstream',
'void',
[param('uint32_t', 'N')])
## rng-stream.h: void ns3::RngStream::ResetStartStream() [member function]
cls.add_method('ResetStartStream',
'void',
[])
## rng-stream.h: void ns3::RngStream::ResetStartSubstream() [member function]
cls.add_method('ResetStartSubstream',
'void',
[])
## rng-stream.h: void ns3::RngStream::SetAntithetic(bool a) [member function]
cls.add_method('SetAntithetic',
'void',
[param('bool', 'a')])
## rng-stream.h: static void ns3::RngStream::SetPackageRun(uint32_t run) [member function]
cls.add_method('SetPackageRun',
'void',
[param('uint32_t', 'run')],
is_static=True)
## rng-stream.h: static bool ns3::RngStream::SetPackageSeed(uint32_t seed) [member function]
cls.add_method('SetPackageSeed',
'bool',
[param('uint32_t', 'seed')],
is_static=True)
## rng-stream.h: static bool ns3::RngStream::SetPackageSeed(uint32_t const * seed) [member function]
cls.add_method('SetPackageSeed',
'bool',
[param('uint32_t const *', 'seed')],
is_static=True)
## rng-stream.h: bool ns3::RngStream::SetSeeds(uint32_t const * seed) [member function]
cls.add_method('SetSeeds',
'bool',
[param('uint32_t const *', 'seed')])
return
def register_Ns3SeedManager_methods(root_module, cls):
## random-variable.h: ns3::SeedManager::SeedManager() [constructor]
cls.add_constructor([])
## random-variable.h: ns3::SeedManager::SeedManager(ns3::SeedManager const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SeedManager const &', 'arg0')])
## random-variable.h: static bool ns3::SeedManager::CheckSeed(uint32_t seed) [member function]
cls.add_method('CheckSeed',
'bool',
[param('uint32_t', 'seed')],
is_static=True)
## random-variable.h: static uint32_t ns3::SeedManager::GetRun() [member function]
cls.add_method('GetRun',
'uint32_t',
[],
is_static=True)
## random-variable.h: static uint32_t ns3::SeedManager::GetSeed() [member function]
cls.add_method('GetSeed',
'uint32_t',
[],
is_static=True)
## random-variable.h: static void ns3::SeedManager::SetRun(uint32_t run) [member function]
cls.add_method('SetRun',
'void',
[param('uint32_t', 'run')],
is_static=True)
## random-variable.h: static void ns3::SeedManager::SetSeed(uint32_t seed) [member function]
cls.add_method('SetSeed',
'void',
[param('uint32_t', 'seed')],
is_static=True)
return
def register_Ns3SequentialVariable_methods(root_module, cls):
## random-variable.h: ns3::SequentialVariable::SequentialVariable(ns3::SequentialVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SequentialVariable const &', 'arg0')])
## random-variable.h: ns3::SequentialVariable::SequentialVariable(double f, double l, double i=1, uint32_t c=1) [constructor]
cls.add_constructor([param('double', 'f'), param('double', 'l'), param('double', 'i', default_value='1'), param('uint32_t', 'c', default_value='1')])
## random-variable.h: ns3::SequentialVariable::SequentialVariable(double f, double l, ns3::RandomVariable const & i, uint32_t c=1) [constructor]
cls.add_constructor([param('double', 'f'), param('double', 'l'), param('ns3::RandomVariable const &', 'i'), param('uint32_t', 'c', default_value='1')])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SystemCondition_methods(root_module, cls):
## system-condition.h: ns3::SystemCondition::SystemCondition(ns3::SystemCondition const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SystemCondition const &', 'arg0')])
## system-condition.h: ns3::SystemCondition::SystemCondition() [constructor]
cls.add_constructor([])
## system-condition.h: void ns3::SystemCondition::Broadcast() [member function]
cls.add_method('Broadcast',
'void',
[])
## system-condition.h: bool ns3::SystemCondition::GetCondition() [member function]
cls.add_method('GetCondition',
'bool',
[])
## system-condition.h: void ns3::SystemCondition::SetCondition(bool condition) [member function]
cls.add_method('SetCondition',
'void',
[param('bool', 'condition')])
## system-condition.h: void ns3::SystemCondition::Signal() [member function]
cls.add_method('Signal',
'void',
[])
## system-condition.h: bool ns3::SystemCondition::TimedWait(uint64_t ns) [member function]
cls.add_method('TimedWait',
'bool',
[param('uint64_t', 'ns')])
## system-condition.h: void ns3::SystemCondition::Wait() [member function]
cls.add_method('Wait',
'void',
[])
return
def register_Ns3SystemMutex_methods(root_module, cls):
## system-mutex.h: ns3::SystemMutex::SystemMutex(ns3::SystemMutex const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SystemMutex const &', 'arg0')])
## system-mutex.h: ns3::SystemMutex::SystemMutex() [constructor]
cls.add_constructor([])
## system-mutex.h: void ns3::SystemMutex::Lock() [member function]
cls.add_method('Lock',
'void',
[])
## system-mutex.h: void ns3::SystemMutex::Unlock() [member function]
cls.add_method('Unlock',
'void',
[])
return
def register_Ns3SystemWallClockMs_methods(root_module, cls):
## system-wall-clock-ms.h: ns3::SystemWallClockMs::SystemWallClockMs(ns3::SystemWallClockMs const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SystemWallClockMs const &', 'arg0')])
## system-wall-clock-ms.h: ns3::SystemWallClockMs::SystemWallClockMs() [constructor]
cls.add_constructor([])
## system-wall-clock-ms.h: int64_t ns3::SystemWallClockMs::End() [member function]
cls.add_method('End',
'int64_t',
[])
## system-wall-clock-ms.h: int64_t ns3::SystemWallClockMs::GetElapsedReal() const [member function]
cls.add_method('GetElapsedReal',
'int64_t',
[],
is_const=True)
## system-wall-clock-ms.h: int64_t ns3::SystemWallClockMs::GetElapsedSystem() const [member function]
cls.add_method('GetElapsedSystem',
'int64_t',
[],
is_const=True)
## system-wall-clock-ms.h: int64_t ns3::SystemWallClockMs::GetElapsedUser() const [member function]
cls.add_method('GetElapsedUser',
'int64_t',
[],
is_const=True)
## system-wall-clock-ms.h: void ns3::SystemWallClockMs::Start() [member function]
cls.add_method('Start',
'void',
[])
return
def register_Ns3TestCase_methods(root_module, cls):
## test.h: ns3::TestCase::TestCase(std::string name) [constructor]
cls.add_constructor([param('std::string', 'name')])
## test.h: bool ns3::TestCase::Run() [member function]
cls.add_method('Run',
'bool',
[])
## test.h: void ns3::TestCase::SetVerbose(bool verbose) [member function]
cls.add_method('SetVerbose',
'void',
[param('bool', 'verbose')])
## test.h: void ns3::TestCase::SetContinueOnFailure(bool continueOnFailure) [member function]
cls.add_method('SetContinueOnFailure',
'void',
[param('bool', 'continueOnFailure')])
## test.h: void ns3::TestCase::SetName(std::string name) [member function]
cls.add_method('SetName',
'void',
[param('std::string', 'name')])
## test.h: std::string ns3::TestCase::GetName() [member function]
cls.add_method('GetName',
'std::string',
[])
## test.h: void ns3::TestCase::SetBaseDir(std::string dir) [member function]
cls.add_method('SetBaseDir',
'void',
[param('std::string', 'dir')])
## test.h: std::string ns3::TestCase::GetBaseDir() [member function]
cls.add_method('GetBaseDir',
'std::string',
[])
## test.h: void ns3::TestCase::SetTempDir(std::string dir) [member function]
cls.add_method('SetTempDir',
'void',
[param('std::string', 'dir')])
## test.h: std::string ns3::TestCase::GetTempDir() [member function]
cls.add_method('GetTempDir',
'std::string',
[])
## test.h: std::string ns3::TestCase::GetSourceDir(std::string file) [member function]
cls.add_method('GetSourceDir',
'std::string',
[param('std::string', 'file')])
## test.h: void ns3::TestCase::SetStream(std::ofstream * ofs) [member function]
cls.add_method('SetStream',
'void',
[param('std::ofstream *', 'ofs')])
## test.h: std::ofstream * ns3::TestCase::GetStream() [member function]
cls.add_method('GetStream',
'std::ofstream *',
[])
## test.h: void ns3::TestCase::UpdateErrorStatus(bool error) [member function]
cls.add_method('UpdateErrorStatus',
'void',
[param('bool', 'error')])
## test.h: void ns3::TestCase::SetErrorStatus(bool error) [member function]
cls.add_method('SetErrorStatus',
'void',
[param('bool', 'error')])
## test.h: bool ns3::TestCase::GetErrorStatus() [member function]
cls.add_method('GetErrorStatus',
'bool',
[])
## test.h: bool ns3::TestCase::ContinueOnFailure() [member function]
cls.add_method('ContinueOnFailure',
'bool',
[])
## test.h: void ns3::TestCase::ReportStart() [member function]
cls.add_method('ReportStart',
'void',
[])
## test.h: void ns3::TestCase::ReportCaseSuccess() [member function]
cls.add_method('ReportCaseSuccess',
'void',
[])
## test.h: void ns3::TestCase::ReportCaseFailure() [member function]
cls.add_method('ReportCaseFailure',
'void',
[])
## test.h: void ns3::TestCase::ReportTestFailure(std::string cond, std::string actual, std::string limit, std::string message, std::string file, int32_t line) [member function]
cls.add_method('ReportTestFailure',
'void',
[param('std::string', 'cond'), param('std::string', 'actual'), param('std::string', 'limit'), param('std::string', 'message'), param('std::string', 'file'), param('int32_t', 'line')])
## test.h: void ns3::TestCase::ReportEnd() [member function]
cls.add_method('ReportEnd',
'void',
[])
## test.h: void ns3::TestCase::DoReportStart() [member function]
cls.add_method('DoReportStart',
'void',
[],
visibility='protected', is_virtual=True)
## test.h: void ns3::TestCase::DoReportCaseSuccess() [member function]
cls.add_method('DoReportCaseSuccess',
'void',
[],
visibility='protected', is_virtual=True)
## test.h: void ns3::TestCase::DoReportCaseFailure() [member function]
cls.add_method('DoReportCaseFailure',
'void',
[],
visibility='protected', is_virtual=True)
## test.h: void ns3::TestCase::DoReportTestFailure(std::string cond, std::string actual, std::string limit, std::string message, std::string file, int32_t line) [member function]
cls.add_method('DoReportTestFailure',
'void',
[param('std::string', 'cond'), param('std::string', 'actual'), param('std::string', 'limit'), param('std::string', 'message'), param('std::string', 'file'), param('int32_t', 'line')],
visibility='protected', is_virtual=True)
## test.h: void ns3::TestCase::DoReportEnd() [member function]
cls.add_method('DoReportEnd',
'void',
[],
visibility='protected', is_virtual=True)
## test.h: void ns3::TestCase::DoSetup() [member function]
cls.add_method('DoSetup',
'void',
[],
visibility='protected', is_virtual=True)
## test.h: bool ns3::TestCase::DoRun() [member function]
cls.add_method('DoRun',
'bool',
[],
is_pure_virtual=True, visibility='protected', is_virtual=True)
## test.h: void ns3::TestCase::DoTeardown() [member function]
cls.add_method('DoTeardown',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3TestRunner_methods(root_module, cls):
## test.h: ns3::TestRunner::TestRunner() [constructor]
cls.add_constructor([])
## test.h: ns3::TestRunner::TestRunner(ns3::TestRunner const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TestRunner const &', 'arg0')])
## test.h: static uint32_t ns3::TestRunner::AddTestSuite(ns3::TestSuite * testSuite) [member function]
cls.add_method('AddTestSuite',
'uint32_t',
[param('ns3::TestSuite *', 'testSuite')],
is_static=True)
## test.h: static uint32_t ns3::TestRunner::GetNTestSuites() [member function]
cls.add_method('GetNTestSuites',
'uint32_t',
[],
is_static=True)
## test.h: static ns3::TestSuite * ns3::TestRunner::GetTestSuite(uint32_t n) [member function]
cls.add_method('GetTestSuite',
'ns3::TestSuite *',
[param('uint32_t', 'n')],
is_static=True)
return
def register_Ns3TestSuite_methods(root_module, cls):
## test.h: ns3::TestSuite::TestSuite(std::string name, ns3::TestSuite::TestType type=::ns3::TestSuite::UNIT) [constructor]
cls.add_constructor([param('std::string', 'name'), param('ns3::TestSuite::TestType', 'type', default_value='::ns3::TestSuite::UNIT')])
## test.h: bool ns3::TestSuite::Run() [member function]
cls.add_method('Run',
'bool',
[])
## test.h: uint32_t ns3::TestSuite::AddTestCase(ns3::TestCase * testCase) [member function]
cls.add_method('AddTestCase',
'uint32_t',
[param('ns3::TestCase *', 'testCase')])
## test.h: uint32_t ns3::TestSuite::GetNTestCases() [member function]
cls.add_method('GetNTestCases',
'uint32_t',
[])
## test.h: ns3::TestCase * ns3::TestSuite::GetTestCase(uint32_t i) [member function]
cls.add_method('GetTestCase',
'ns3::TestCase *',
[param('uint32_t', 'i')])
## test.h: ns3::TestSuite::TestType ns3::TestSuite::GetTestType() [member function]
cls.add_method('GetTestType',
'ns3::TestSuite::TestType',
[])
## test.h: void ns3::TestSuite::SetVerbose(bool verbose) [member function]
cls.add_method('SetVerbose',
'void',
[param('bool', 'verbose')])
## test.h: void ns3::TestSuite::SetContinueOnFailure(bool continueOnFailure) [member function]
cls.add_method('SetContinueOnFailure',
'void',
[param('bool', 'continueOnFailure')])
## test.h: void ns3::TestSuite::SetName(std::string name) [member function]
cls.add_method('SetName',
'void',
[param('std::string', 'name')])
## test.h: std::string ns3::TestSuite::GetName() [member function]
cls.add_method('GetName',
'std::string',
[])
## test.h: void ns3::TestSuite::SetBaseDir(std::string basedir) [member function]
cls.add_method('SetBaseDir',
'void',
[param('std::string', 'basedir')])
## test.h: std::string ns3::TestSuite::GetBaseDir() [member function]
cls.add_method('GetBaseDir',
'std::string',
[])
## test.h: void ns3::TestSuite::SetTempDir(std::string dir) [member function]
cls.add_method('SetTempDir',
'void',
[param('std::string', 'dir')])
## test.h: std::string ns3::TestSuite::GetTempDir() [member function]
cls.add_method('GetTempDir',
'std::string',
[])
## test.h: void ns3::TestSuite::SetStream(std::ofstream * ofs) [member function]
cls.add_method('SetStream',
'void',
[param('std::ofstream *', 'ofs')])
## test.h: void ns3::TestSuite::UpdateErrorStatus(bool error) [member function]
cls.add_method('UpdateErrorStatus',
'void',
[param('bool', 'error')])
## test.h: void ns3::TestSuite::SetErrorStatus(bool error) [member function]
cls.add_method('SetErrorStatus',
'void',
[param('bool', 'error')])
## test.h: bool ns3::TestSuite::GetErrorStatus() [member function]
cls.add_method('GetErrorStatus',
'bool',
[])
## test.h: bool ns3::TestSuite::ContinueOnFailure() [member function]
cls.add_method('ContinueOnFailure',
'bool',
[])
## test.h: void ns3::TestSuite::ReportStart() [member function]
cls.add_method('ReportStart',
'void',
[])
## test.h: void ns3::TestSuite::ReportSuccess() [member function]
cls.add_method('ReportSuccess',
'void',
[])
## test.h: void ns3::TestSuite::ReportFailure() [member function]
cls.add_method('ReportFailure',
'void',
[])
## test.h: void ns3::TestSuite::ReportEnd() [member function]
cls.add_method('ReportEnd',
'void',
[])
## test.h: void ns3::TestSuite::DoReportStart() [member function]
cls.add_method('DoReportStart',
'void',
[],
visibility='protected', is_virtual=True)
## test.h: void ns3::TestSuite::DoReportSuccess() [member function]
cls.add_method('DoReportSuccess',
'void',
[],
visibility='protected', is_virtual=True)
## test.h: void ns3::TestSuite::DoReportFailure() [member function]
cls.add_method('DoReportFailure',
'void',
[],
visibility='protected', is_virtual=True)
## test.h: void ns3::TestSuite::DoReportEnd() [member function]
cls.add_method('DoReportEnd',
'void',
[],
visibility='protected', is_virtual=True)
## test.h: void ns3::TestSuite::DoSetup() [member function]
cls.add_method('DoSetup',
'void',
[],
visibility='protected', is_virtual=True)
## test.h: bool ns3::TestSuite::DoRun() [member function]
cls.add_method('DoRun',
'bool',
[],
visibility='protected', is_virtual=True)
## test.h: void ns3::TestSuite::DoTeardown() [member function]
cls.add_method('DoTeardown',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3TracedValue__Double_methods(root_module, cls):
## traced-value.h: ns3::TracedValue<double>::TracedValue() [constructor]
cls.add_constructor([])
## traced-value.h: ns3::TracedValue<double>::TracedValue(ns3::TracedValue<double> const & o) [copy constructor]
cls.add_constructor([param('ns3::TracedValue< double > const &', 'o')])
## traced-value.h: ns3::TracedValue<double>::TracedValue(double const & v) [constructor]
cls.add_constructor([param('double const &', 'v')])
## traced-value.h: void ns3::TracedValue<double>::Connect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Connect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h: void ns3::TracedValue<double>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h: void ns3::TracedValue<double>::Disconnect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Disconnect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h: void ns3::TracedValue<double>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h: double ns3::TracedValue<double>::Get() const [member function]
cls.add_method('Get',
'double',
[],
is_const=True)
## traced-value.h: void ns3::TracedValue<double>::Set(double const & v) [member function]
cls.add_method('Set',
'void',
[param('double const &', 'v')])
return
def register_Ns3TracedValue__Ns3Time_methods(root_module, cls):
## traced-value.h: ns3::TracedValue<ns3::Time>::TracedValue() [constructor]
cls.add_constructor([])
## traced-value.h: ns3::TracedValue<ns3::Time>::TracedValue(ns3::TracedValue<ns3::Time> const & o) [copy constructor]
cls.add_constructor([param('ns3::TracedValue< ns3::Time > const &', 'o')])
## traced-value.h: ns3::TracedValue<ns3::Time>::TracedValue(ns3::Time const & v) [constructor]
cls.add_constructor([param('ns3::Time const &', 'v')])
## traced-value.h: void ns3::TracedValue<ns3::Time>::Connect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Connect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h: void ns3::TracedValue<ns3::Time>::ConnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h: void ns3::TracedValue<ns3::Time>::Disconnect(ns3::CallbackBase const & cb, std::basic_string<char,std::char_traits<char>,std::allocator<char> > path) [member function]
cls.add_method('Disconnect',
'void',
[param('ns3::CallbackBase const &', 'cb'), param('std::string', 'path')])
## traced-value.h: void ns3::TracedValue<ns3::Time>::DisconnectWithoutContext(ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('ns3::CallbackBase const &', 'cb')])
## traced-value.h: ns3::Time ns3::TracedValue<ns3::Time>::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## traced-value.h: void ns3::TracedValue<ns3::Time>::Set(ns3::Time const & v) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'v')])
return
def register_Ns3TriangularVariable_methods(root_module, cls):
## random-variable.h: ns3::TriangularVariable::TriangularVariable(ns3::TriangularVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TriangularVariable const &', 'arg0')])
## random-variable.h: ns3::TriangularVariable::TriangularVariable() [constructor]
cls.add_constructor([])
## random-variable.h: ns3::TriangularVariable::TriangularVariable(double s, double l, double mean) [constructor]
cls.add_constructor([param('double', 's'), param('double', 'l'), param('double', 'mean')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('<')
## type-id.h: ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h: ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h: ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h: ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h: ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h: ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h: ns3::Ptr<ns3::AttributeAccessor const> ns3::TypeId::GetAttributeAccessor(uint32_t i) const [member function]
cls.add_method('GetAttributeAccessor',
'ns3::Ptr< ns3::AttributeAccessor const >',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h: ns3::Ptr<ns3::AttributeChecker const> ns3::TypeId::GetAttributeChecker(uint32_t i) const [member function]
cls.add_method('GetAttributeChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h: uint32_t ns3::TypeId::GetAttributeFlags(uint32_t i) const [member function]
cls.add_method('GetAttributeFlags',
'uint32_t',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h: std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h: std::string ns3::TypeId::GetAttributeHelp(uint32_t i) const [member function]
cls.add_method('GetAttributeHelp',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h: ns3::Ptr<ns3::AttributeValue const> ns3::TypeId::GetAttributeInitialValue(uint32_t i) const [member function]
cls.add_method('GetAttributeInitialValue',
'ns3::Ptr< ns3::AttributeValue const >',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h: uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h: std::string ns3::TypeId::GetAttributeName(uint32_t i) const [member function]
cls.add_method('GetAttributeName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h: ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h: std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h: std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h: ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h: static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h: static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h: ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::GetTraceSourceAccessor(uint32_t i) const [member function]
cls.add_method('GetTraceSourceAccessor',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h: std::string ns3::TypeId::GetTraceSourceHelp(uint32_t i) const [member function]
cls.add_method('GetTraceSourceHelp',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h: uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h: std::string ns3::TypeId::GetTraceSourceName(uint32_t i) const [member function]
cls.add_method('GetTraceSourceName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h: uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h: bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h: bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h: ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h: bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h: static bool ns3::TypeId::LookupAttributeByFullName(std::string fullName, ns3::TypeId::AttributeInfo * info) [member function]
cls.add_method('LookupAttributeByFullName',
'bool',
[param('std::string', 'fullName'), param('ns3::TypeId::AttributeInfo *', 'info')],
is_static=True)
## type-id.h: bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInfo * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInfo *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h: static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h: ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h: bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h: ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h: ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h: void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInfo_methods(root_module, cls):
## type-id.h: ns3::TypeId::AttributeInfo::AttributeInfo() [constructor]
cls.add_constructor([])
## type-id.h: ns3::TypeId::AttributeInfo::AttributeInfo(ns3::TypeId::AttributeInfo const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInfo const &', 'arg0')])
## type-id.h: ns3::TypeId::AttributeInfo::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h: ns3::TypeId::AttributeInfo::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h: ns3::TypeId::AttributeInfo::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h: ns3::TypeId::AttributeInfo::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3UniformVariable_methods(root_module, cls):
## random-variable.h: ns3::UniformVariable::UniformVariable(ns3::UniformVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UniformVariable const &', 'arg0')])
## random-variable.h: ns3::UniformVariable::UniformVariable() [constructor]
cls.add_constructor([])
## random-variable.h: ns3::UniformVariable::UniformVariable(double s, double l) [constructor]
cls.add_constructor([param('double', 's'), param('double', 'l')])
## random-variable.h: uint32_t ns3::UniformVariable::GetInteger(uint32_t s, uint32_t l) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 's'), param('uint32_t', 'l')])
## random-variable.h: double ns3::UniformVariable::GetValue() const [member function]
cls.add_method('GetValue',
'double',
[],
is_const=True)
## random-variable.h: double ns3::UniformVariable::GetValue(double s, double l) [member function]
cls.add_method('GetValue',
'double',
[param('double', 's'), param('double', 'l')])
return
def register_Ns3UnsafeAttributeList_methods(root_module, cls):
## attribute-list.h: ns3::UnsafeAttributeList::UnsafeAttributeList() [constructor]
cls.add_constructor([])
## attribute-list.h: ns3::UnsafeAttributeList::UnsafeAttributeList(ns3::UnsafeAttributeList const & o) [copy constructor]
cls.add_constructor([param('ns3::UnsafeAttributeList const &', 'o')])
## attribute-list.h: ns3::AttributeList ns3::UnsafeAttributeList::GetSafe(std::string name) const [member function]
cls.add_method('GetSafe',
'ns3::AttributeList',
[param('std::string', 'name')],
is_const=True)
## attribute-list.h: void ns3::UnsafeAttributeList::Set(std::string name, ns3::AttributeValue const & param) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'param')])
return
def register_Ns3Vector2D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h: ns3::Vector2D::Vector2D(ns3::Vector2D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'arg0')])
## vector.h: ns3::Vector2D::Vector2D(double _x, double _y) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y')])
## vector.h: ns3::Vector2D::Vector2D() [constructor]
cls.add_constructor([])
## vector.h: ns3::Vector2D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h: ns3::Vector2D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
return
def register_Ns3Vector3D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h: ns3::Vector3D::Vector3D(ns3::Vector3D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'arg0')])
## vector.h: ns3::Vector3D::Vector3D(double _x, double _y, double _z) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y'), param('double', '_z')])
## vector.h: ns3::Vector3D::Vector3D() [constructor]
cls.add_constructor([])
## vector.h: ns3::Vector3D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h: ns3::Vector3D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
## vector.h: ns3::Vector3D::z [variable]
cls.add_instance_attribute('z', 'double', is_const=False)
return
def register_Ns3WeibullVariable_methods(root_module, cls):
## random-variable.h: ns3::WeibullVariable::WeibullVariable(ns3::WeibullVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::WeibullVariable const &', 'arg0')])
## random-variable.h: ns3::WeibullVariable::WeibullVariable() [constructor]
cls.add_constructor([])
## random-variable.h: ns3::WeibullVariable::WeibullVariable(double m) [constructor]
cls.add_constructor([param('double', 'm')])
## random-variable.h: ns3::WeibullVariable::WeibullVariable(double m, double s) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 's')])
## random-variable.h: ns3::WeibullVariable::WeibullVariable(double m, double s, double b) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 's'), param('double', 'b')])
return
def register_Ns3ZetaVariable_methods(root_module, cls):
## random-variable.h: ns3::ZetaVariable::ZetaVariable(ns3::ZetaVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ZetaVariable const &', 'arg0')])
## random-variable.h: ns3::ZetaVariable::ZetaVariable(double alpha) [constructor]
cls.add_constructor([param('double', 'alpha')])
## random-variable.h: ns3::ZetaVariable::ZetaVariable() [constructor]
cls.add_constructor([])
return
def register_Ns3ZipfVariable_methods(root_module, cls):
## random-variable.h: ns3::ZipfVariable::ZipfVariable(ns3::ZipfVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ZipfVariable const &', 'arg0')])
## random-variable.h: ns3::ZipfVariable::ZipfVariable(long int N, double alpha) [constructor]
cls.add_constructor([param('long int', 'N'), param('double', 'alpha')])
## random-variable.h: ns3::ZipfVariable::ZipfVariable() [constructor]
cls.add_constructor([])
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h: ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h: ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3ConstantVariable_methods(root_module, cls):
## random-variable.h: ns3::ConstantVariable::ConstantVariable(ns3::ConstantVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ConstantVariable const &', 'arg0')])
## random-variable.h: ns3::ConstantVariable::ConstantVariable() [constructor]
cls.add_constructor([])
## random-variable.h: ns3::ConstantVariable::ConstantVariable(double c) [constructor]
cls.add_constructor([param('double', 'c')])
## random-variable.h: void ns3::ConstantVariable::SetConstant(double c) [member function]
cls.add_method('SetConstant',
'void',
[param('double', 'c')])
return
def register_Ns3DeterministicVariable_methods(root_module, cls):
## random-variable.h: ns3::DeterministicVariable::DeterministicVariable(ns3::DeterministicVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DeterministicVariable const &', 'arg0')])
## random-variable.h: ns3::DeterministicVariable::DeterministicVariable(double * d, uint32_t c) [constructor]
cls.add_constructor([param('double *', 'd'), param('uint32_t', 'c')])
return
def register_Ns3EmpiricalVariable_methods(root_module, cls):
## random-variable.h: ns3::EmpiricalVariable::EmpiricalVariable(ns3::EmpiricalVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmpiricalVariable const &', 'arg0')])
## random-variable.h: ns3::EmpiricalVariable::EmpiricalVariable() [constructor]
cls.add_constructor([])
## random-variable.h: void ns3::EmpiricalVariable::CDF(double v, double c) [member function]
cls.add_method('CDF',
'void',
[param('double', 'v'), param('double', 'c')])
return
def register_Ns3ErlangVariable_methods(root_module, cls):
## random-variable.h: ns3::ErlangVariable::ErlangVariable(ns3::ErlangVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ErlangVariable const &', 'arg0')])
## random-variable.h: ns3::ErlangVariable::ErlangVariable() [constructor]
cls.add_constructor([])
## random-variable.h: ns3::ErlangVariable::ErlangVariable(unsigned int k, double lambda) [constructor]
cls.add_constructor([param('unsigned int', 'k'), param('double', 'lambda')])
## random-variable.h: double ns3::ErlangVariable::GetValue() const [member function]
cls.add_method('GetValue',
'double',
[],
is_const=True)
## random-variable.h: double ns3::ErlangVariable::GetValue(unsigned int k, double lambda) const [member function]
cls.add_method('GetValue',
'double',
[param('unsigned int', 'k'), param('double', 'lambda')],
is_const=True)
return
def register_Ns3ExponentialVariable_methods(root_module, cls):
## random-variable.h: ns3::ExponentialVariable::ExponentialVariable(ns3::ExponentialVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ExponentialVariable const &', 'arg0')])
## random-variable.h: ns3::ExponentialVariable::ExponentialVariable() [constructor]
cls.add_constructor([])
## random-variable.h: ns3::ExponentialVariable::ExponentialVariable(double m) [constructor]
cls.add_constructor([param('double', 'm')])
## random-variable.h: ns3::ExponentialVariable::ExponentialVariable(double m, double b) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 'b')])
return
def register_Ns3GammaVariable_methods(root_module, cls):
## random-variable.h: ns3::GammaVariable::GammaVariable(ns3::GammaVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::GammaVariable const &', 'arg0')])
## random-variable.h: ns3::GammaVariable::GammaVariable() [constructor]
cls.add_constructor([])
## random-variable.h: ns3::GammaVariable::GammaVariable(double alpha, double beta) [constructor]
cls.add_constructor([param('double', 'alpha'), param('double', 'beta')])
## random-variable.h: double ns3::GammaVariable::GetValue() const [member function]
cls.add_method('GetValue',
'double',
[],
is_const=True)
## random-variable.h: double ns3::GammaVariable::GetValue(double alpha, double beta) const [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha'), param('double', 'beta')],
is_const=True)
return
def register_Ns3IntEmpiricalVariable_methods(root_module, cls):
## random-variable.h: ns3::IntEmpiricalVariable::IntEmpiricalVariable(ns3::IntEmpiricalVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntEmpiricalVariable const &', 'arg0')])
## random-variable.h: ns3::IntEmpiricalVariable::IntEmpiricalVariable() [constructor]
cls.add_constructor([])
return
def register_Ns3LogNormalVariable_methods(root_module, cls):
## random-variable.h: ns3::LogNormalVariable::LogNormalVariable(ns3::LogNormalVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::LogNormalVariable const &', 'arg0')])
## random-variable.h: ns3::LogNormalVariable::LogNormalVariable(double mu, double sigma) [constructor]
cls.add_constructor([param('double', 'mu'), param('double', 'sigma')])
return
def register_Ns3NormalVariable_methods(root_module, cls):
## random-variable.h: ns3::NormalVariable::NormalVariable(ns3::NormalVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::NormalVariable const &', 'arg0')])
## random-variable.h: ns3::NormalVariable::NormalVariable() [constructor]
cls.add_constructor([])
## random-variable.h: ns3::NormalVariable::NormalVariable(double m, double v) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 'v')])
## random-variable.h: ns3::NormalVariable::NormalVariable(double m, double v, double b) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 'v'), param('double', 'b')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h: ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h: void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h: void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h: ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h: ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h: ns3::Ptr<ns3::Object> ns3::Object::GetObject(ns3::TypeId tid) const [member function]
cls.add_method('GetObject',
'ns3::Ptr< ns3::Object >',
[param('ns3::TypeId', 'tid')],
is_const=True, template_parameters=['ns3::Object'], custom_template_method_name='GetObject')
## object.h: static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h: void ns3::Object::Start() [member function]
cls.add_method('Start',
'void',
[])
## object.h: ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h: void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h: void ns3::Object::DoStart() [member function]
cls.add_method('DoStart',
'void',
[],
visibility='protected', is_virtual=True)
## object.h: void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h: ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h: ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h: bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h: ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3ParetoVariable_methods(root_module, cls):
## random-variable.h: ns3::ParetoVariable::ParetoVariable(ns3::ParetoVariable const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ParetoVariable const &', 'arg0')])
## random-variable.h: ns3::ParetoVariable::ParetoVariable() [constructor]
cls.add_constructor([])
## random-variable.h: ns3::ParetoVariable::ParetoVariable(double m) [constructor]
cls.add_constructor([param('double', 'm')])
## random-variable.h: ns3::ParetoVariable::ParetoVariable(double m, double s) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 's')])
## random-variable.h: ns3::ParetoVariable::ParetoVariable(double m, double s, double b) [constructor]
cls.add_constructor([param('double', 'm'), param('double', 's'), param('double', 'b')])
## random-variable.h: ns3::ParetoVariable::ParetoVariable(std::pair<double,double> params) [constructor]
cls.add_constructor([param('std::pair< double, double >', 'params')])
## random-variable.h: ns3::ParetoVariable::ParetoVariable(std::pair<double,double> params, double b) [constructor]
cls.add_constructor([param('std::pair< double, double >', 'params'), param('double', 'b')])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3EventImpl_Ns3Empty_Ns3DefaultDeleter__lt__ns3EventImpl__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::SimpleRefCount(ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::EventImpl, ns3::empty, ns3::DefaultDeleter< ns3::EventImpl > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::EventImpl, ns3::empty, ns3::DefaultDeleter<ns3::EventImpl> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3FlowClassifier_Ns3Empty_Ns3DefaultDeleter__lt__ns3FlowClassifier__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter<ns3::FlowClassifier> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter<ns3::FlowClassifier> >::SimpleRefCount(ns3::SimpleRefCount<ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter<ns3::FlowClassifier> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter< ns3::FlowClassifier > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::FlowClassifier, ns3::empty, ns3::DefaultDeleter<ns3::FlowClassifier> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3FlowProbe_Ns3Empty_Ns3DefaultDeleter__lt__ns3FlowProbe__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::FlowProbe, ns3::empty, ns3::DefaultDeleter<ns3::FlowProbe> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::FlowProbe, ns3::empty, ns3::DefaultDeleter<ns3::FlowProbe> >::SimpleRefCount(ns3::SimpleRefCount<ns3::FlowProbe, ns3::empty, ns3::DefaultDeleter<ns3::FlowProbe> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::FlowProbe, ns3::empty, ns3::DefaultDeleter< ns3::FlowProbe > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::FlowProbe, ns3::empty, ns3::DefaultDeleter<ns3::FlowProbe> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3IdealControlMessage_Ns3Empty_Ns3DefaultDeleter__lt__ns3IdealControlMessage__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::IdealControlMessage, ns3::empty, ns3::DefaultDeleter<ns3::IdealControlMessage> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::IdealControlMessage, ns3::empty, ns3::DefaultDeleter<ns3::IdealControlMessage> >::SimpleRefCount(ns3::SimpleRefCount<ns3::IdealControlMessage, ns3::empty, ns3::DefaultDeleter<ns3::IdealControlMessage> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::IdealControlMessage, ns3::empty, ns3::DefaultDeleter< ns3::IdealControlMessage > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::IdealControlMessage, ns3::empty, ns3::DefaultDeleter<ns3::IdealControlMessage> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3InterferenceHelperEvent_Ns3Empty_Ns3DefaultDeleter__lt__ns3InterferenceHelperEvent__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::InterferenceHelper::Event, ns3::empty, ns3::DefaultDeleter<ns3::InterferenceHelper::Event> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::InterferenceHelper::Event, ns3::empty, ns3::DefaultDeleter<ns3::InterferenceHelper::Event> >::SimpleRefCount(ns3::SimpleRefCount<ns3::InterferenceHelper::Event, ns3::empty, ns3::DefaultDeleter<ns3::InterferenceHelper::Event> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::InterferenceHelper::Event, ns3::empty, ns3::DefaultDeleter< ns3::InterferenceHelper::Event > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::InterferenceHelper::Event, ns3::empty, ns3::DefaultDeleter<ns3::InterferenceHelper::Event> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Ipv4MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4MulticastRoute__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4MulticastRoute > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::Ipv4MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4MulticastRoute> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Ipv4Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv4Route__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter< ns3::Ipv4Route > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::Ipv4Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv4Route> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Ipv6MulticastRoute_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv6MulticastRoute__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::Ipv6MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv6MulticastRoute> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::Ipv6MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv6MulticastRoute> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv6MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv6MulticastRoute> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv6MulticastRoute, ns3::empty, ns3::DefaultDeleter< ns3::Ipv6MulticastRoute > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::Ipv6MulticastRoute, ns3::empty, ns3::DefaultDeleter<ns3::Ipv6MulticastRoute> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Ipv6Route_Ns3Empty_Ns3DefaultDeleter__lt__ns3Ipv6Route__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::Ipv6Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv6Route> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::Ipv6Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv6Route> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Ipv6Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv6Route> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Ipv6Route, ns3::empty, ns3::DefaultDeleter< ns3::Ipv6Route > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::Ipv6Route, ns3::empty, ns3::DefaultDeleter<ns3::Ipv6Route> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3MeshWifiInterfaceMacPlugin_Ns3Empty_Ns3DefaultDeleter__lt__ns3MeshWifiInterfaceMacPlugin__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::MeshWifiInterfaceMacPlugin, ns3::empty, ns3::DefaultDeleter<ns3::MeshWifiInterfaceMacPlugin> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::MeshWifiInterfaceMacPlugin, ns3::empty, ns3::DefaultDeleter<ns3::MeshWifiInterfaceMacPlugin> >::SimpleRefCount(ns3::SimpleRefCount<ns3::MeshWifiInterfaceMacPlugin, ns3::empty, ns3::DefaultDeleter<ns3::MeshWifiInterfaceMacPlugin> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::MeshWifiInterfaceMacPlugin, ns3::empty, ns3::DefaultDeleter< ns3::MeshWifiInterfaceMacPlugin > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::MeshWifiInterfaceMacPlugin, ns3::empty, ns3::DefaultDeleter<ns3::MeshWifiInterfaceMacPlugin> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3NixVector_Ns3Empty_Ns3DefaultDeleter__lt__ns3NixVector__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::SimpleRefCount(ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::NixVector, ns3::empty, ns3::DefaultDeleter< ns3::NixVector > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::NixVector, ns3::empty, ns3::DefaultDeleter<ns3::NixVector> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3OutputStreamWrapper_Ns3Empty_Ns3DefaultDeleter__lt__ns3OutputStreamWrapper__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::SimpleRefCount(ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter< ns3::OutputStreamWrapper > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::OutputStreamWrapper, ns3::empty, ns3::DefaultDeleter<ns3::OutputStreamWrapper> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Packet_Ns3Empty_Ns3DefaultDeleter__lt__ns3Packet__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Packet, ns3::empty, ns3::DefaultDeleter< ns3::Packet > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::Packet, ns3::empty, ns3::DefaultDeleter<ns3::Packet> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3PbbAddressBlock_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbAddressBlock__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter<ns3::PbbAddressBlock> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter<ns3::PbbAddressBlock> >::SimpleRefCount(ns3::SimpleRefCount<ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter<ns3::PbbAddressBlock> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter< ns3::PbbAddressBlock > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::PbbAddressBlock, ns3::empty, ns3::DefaultDeleter<ns3::PbbAddressBlock> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3PbbMessage_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbMessage__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter<ns3::PbbMessage> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter<ns3::PbbMessage> >::SimpleRefCount(ns3::SimpleRefCount<ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter<ns3::PbbMessage> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter< ns3::PbbMessage > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::PbbMessage, ns3::empty, ns3::DefaultDeleter<ns3::PbbMessage> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3PbbPacket_Ns3Header_Ns3DefaultDeleter__lt__ns3PbbPacket__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter<ns3::PbbPacket> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter<ns3::PbbPacket> >::SimpleRefCount(ns3::SimpleRefCount<ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter<ns3::PbbPacket> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter< ns3::PbbPacket > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::PbbPacket, ns3::Header, ns3::DefaultDeleter<ns3::PbbPacket> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3PbbTlv_Ns3Empty_Ns3DefaultDeleter__lt__ns3PbbTlv__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter<ns3::PbbTlv> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter<ns3::PbbTlv> >::SimpleRefCount(ns3::SimpleRefCount<ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter<ns3::PbbTlv> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter< ns3::PbbTlv > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::PbbTlv, ns3::empty, ns3::DefaultDeleter<ns3::PbbTlv> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3RadvdInterface_Ns3Empty_Ns3DefaultDeleter__lt__ns3RadvdInterface__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::RadvdInterface, ns3::empty, ns3::DefaultDeleter<ns3::RadvdInterface> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::RadvdInterface, ns3::empty, ns3::DefaultDeleter<ns3::RadvdInterface> >::SimpleRefCount(ns3::SimpleRefCount<ns3::RadvdInterface, ns3::empty, ns3::DefaultDeleter<ns3::RadvdInterface> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::RadvdInterface, ns3::empty, ns3::DefaultDeleter< ns3::RadvdInterface > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::RadvdInterface, ns3::empty, ns3::DefaultDeleter<ns3::RadvdInterface> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3RadvdPrefix_Ns3Empty_Ns3DefaultDeleter__lt__ns3RadvdPrefix__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::RadvdPrefix, ns3::empty, ns3::DefaultDeleter<ns3::RadvdPrefix> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::RadvdPrefix, ns3::empty, ns3::DefaultDeleter<ns3::RadvdPrefix> >::SimpleRefCount(ns3::SimpleRefCount<ns3::RadvdPrefix, ns3::empty, ns3::DefaultDeleter<ns3::RadvdPrefix> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::RadvdPrefix, ns3::empty, ns3::DefaultDeleter< ns3::RadvdPrefix > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::RadvdPrefix, ns3::empty, ns3::DefaultDeleter<ns3::RadvdPrefix> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3RefCountBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3RefCountBase__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter< ns3::RefCountBase > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::RefCountBase, ns3::empty, ns3::DefaultDeleter<ns3::RefCountBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3SpectrumConverter_Ns3Empty_Ns3DefaultDeleter__lt__ns3SpectrumConverter__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::SpectrumConverter, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumConverter> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::SpectrumConverter, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumConverter> >::SimpleRefCount(ns3::SimpleRefCount<ns3::SpectrumConverter, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumConverter> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::SpectrumConverter, ns3::empty, ns3::DefaultDeleter< ns3::SpectrumConverter > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::SpectrumConverter, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumConverter> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3SpectrumModel_Ns3Empty_Ns3DefaultDeleter__lt__ns3SpectrumModel__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::SpectrumModel, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumModel> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::SpectrumModel, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumModel> >::SimpleRefCount(ns3::SimpleRefCount<ns3::SpectrumModel, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumModel> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::SpectrumModel, ns3::empty, ns3::DefaultDeleter< ns3::SpectrumModel > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::SpectrumModel, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumModel> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3SpectrumValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3SpectrumValue__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::SpectrumValue, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::SpectrumValue, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::SpectrumValue, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::SpectrumValue, ns3::empty, ns3::DefaultDeleter< ns3::SpectrumValue > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::SpectrumValue, ns3::empty, ns3::DefaultDeleter<ns3::SpectrumValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3SystemThread_Ns3Empty_Ns3DefaultDeleter__lt__ns3SystemThread__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::SimpleRefCount(ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter< ns3::SystemThread > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3WifiInformationElement_Ns3Empty_Ns3DefaultDeleter__lt__ns3WifiInformationElement__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::WifiInformationElement, ns3::empty, ns3::DefaultDeleter<ns3::WifiInformationElement> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::WifiInformationElement, ns3::empty, ns3::DefaultDeleter<ns3::WifiInformationElement> >::SimpleRefCount(ns3::SimpleRefCount<ns3::WifiInformationElement, ns3::empty, ns3::DefaultDeleter<ns3::WifiInformationElement> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::WifiInformationElement, ns3::empty, ns3::DefaultDeleter< ns3::WifiInformationElement > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::WifiInformationElement, ns3::empty, ns3::DefaultDeleter<ns3::WifiInformationElement> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Dot11sIeBeaconTimingUnit_Ns3Empty_Ns3DefaultDeleter__lt__ns3Dot11sIeBeaconTimingUnit__gt___methods(root_module, cls):
## simple-ref-count.h: ns3::SimpleRefCount<ns3::dot11s::IeBeaconTimingUnit, ns3::empty, ns3::DefaultDeleter<ns3::dot11s::IeBeaconTimingUnit> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h: ns3::SimpleRefCount<ns3::dot11s::IeBeaconTimingUnit, ns3::empty, ns3::DefaultDeleter<ns3::dot11s::IeBeaconTimingUnit> >::SimpleRefCount(ns3::SimpleRefCount<ns3::dot11s::IeBeaconTimingUnit, ns3::empty, ns3::DefaultDeleter<ns3::dot11s::IeBeaconTimingUnit> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::dot11s::IeBeaconTimingUnit, ns3::empty, ns3::DefaultDeleter< ns3::dot11s::IeBeaconTimingUnit > > const &', 'o')])
## simple-ref-count.h: static void ns3::SimpleRefCount<ns3::dot11s::IeBeaconTimingUnit, ns3::empty, ns3::DefaultDeleter<ns3::dot11s::IeBeaconTimingUnit> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SystemThread_methods(root_module, cls):
## system-thread.h: ns3::SystemThread::SystemThread(ns3::SystemThread const & arg0) [copy constructor]
cls.add_constructor([param('ns3::SystemThread const &', 'arg0')])
## system-thread.h: ns3::SystemThread::SystemThread(ns3::Callback<void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> callback) [constructor]
cls.add_constructor([param('ns3::Callback< void, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'callback')])
## system-thread.h: bool ns3::SystemThread::Break() [member function]
cls.add_method('Break',
'bool',
[])
## system-thread.h: void ns3::SystemThread::Join() [member function]
cls.add_method('Join',
'void',
[])
## system-thread.h: void ns3::SystemThread::Shutdown() [member function]
cls.add_method('Shutdown',
'void',
[])
## system-thread.h: void ns3::SystemThread::Start() [member function]
cls.add_method('Start',
'void',
[])
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h: ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h: ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h: bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h: bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h: bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h: bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h: ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h: ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h: bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h: bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h: bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h: bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h: ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h: ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h: bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h: bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h: ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h: std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h: std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h: bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h: ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h: ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h: ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h: bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h: std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3BooleanChecker_methods(root_module, cls):
## boolean.h: ns3::BooleanChecker::BooleanChecker() [constructor]
cls.add_constructor([])
## boolean.h: ns3::BooleanChecker::BooleanChecker(ns3::BooleanChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BooleanChecker const &', 'arg0')])
return
def register_Ns3BooleanValue_methods(root_module, cls):
cls.add_output_stream_operator()
## boolean.h: ns3::BooleanValue::BooleanValue(ns3::BooleanValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::BooleanValue const &', 'arg0')])
## boolean.h: ns3::BooleanValue::BooleanValue() [constructor]
cls.add_constructor([])
## boolean.h: ns3::BooleanValue::BooleanValue(bool value) [constructor]
cls.add_constructor([param('bool', 'value')])
## boolean.h: ns3::Ptr<ns3::AttributeValue> ns3::BooleanValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## boolean.h: bool ns3::BooleanValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## boolean.h: bool ns3::BooleanValue::Get() const [member function]
cls.add_method('Get',
'bool',
[],
is_const=True)
## boolean.h: std::string ns3::BooleanValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## boolean.h: void ns3::BooleanValue::Set(bool value) [member function]
cls.add_method('Set',
'void',
[param('bool', 'value')])
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h: ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h: ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h: ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h: ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h: bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h: ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h: ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h: ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h: ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h: bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h: std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h: void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3DoubleValue_methods(root_module, cls):
## double.h: ns3::DoubleValue::DoubleValue() [constructor]
cls.add_constructor([])
## double.h: ns3::DoubleValue::DoubleValue(ns3::DoubleValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::DoubleValue const &', 'arg0')])
## double.h: ns3::DoubleValue::DoubleValue(double const & value) [constructor]
cls.add_constructor([param('double const &', 'value')])
## double.h: ns3::Ptr<ns3::AttributeValue> ns3::DoubleValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## double.h: bool ns3::DoubleValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## double.h: double ns3::DoubleValue::Get() const [member function]
cls.add_method('Get',
'double',
[],
is_const=True)
## double.h: std::string ns3::DoubleValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## double.h: void ns3::DoubleValue::Set(double const & value) [member function]
cls.add_method('Set',
'void',
[param('double const &', 'value')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h: ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h: ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h: ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h: bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h: std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3EnumChecker_methods(root_module, cls):
## enum.h: ns3::EnumChecker::EnumChecker(ns3::EnumChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnumChecker const &', 'arg0')])
## enum.h: ns3::EnumChecker::EnumChecker() [constructor]
cls.add_constructor([])
## enum.h: void ns3::EnumChecker::Add(int v, std::string name) [member function]
cls.add_method('Add',
'void',
[param('int', 'v'), param('std::string', 'name')])
## enum.h: void ns3::EnumChecker::AddDefault(int v, std::string name) [member function]
cls.add_method('AddDefault',
'void',
[param('int', 'v'), param('std::string', 'name')])
## enum.h: bool ns3::EnumChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## enum.h: bool ns3::EnumChecker::Copy(ns3::AttributeValue const & src, ns3::AttributeValue & dst) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'src'), param('ns3::AttributeValue &', 'dst')],
is_const=True, is_virtual=True)
## enum.h: ns3::Ptr<ns3::AttributeValue> ns3::EnumChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## enum.h: std::string ns3::EnumChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_const=True, is_virtual=True)
## enum.h: std::string ns3::EnumChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_const=True, is_virtual=True)
## enum.h: bool ns3::EnumChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_const=True, is_virtual=True)
return
def register_Ns3EnumValue_methods(root_module, cls):
## enum.h: ns3::EnumValue::EnumValue(ns3::EnumValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EnumValue const &', 'arg0')])
## enum.h: ns3::EnumValue::EnumValue() [constructor]
cls.add_constructor([])
## enum.h: ns3::EnumValue::EnumValue(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## enum.h: ns3::Ptr<ns3::AttributeValue> ns3::EnumValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## enum.h: bool ns3::EnumValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## enum.h: int ns3::EnumValue::Get() const [member function]
cls.add_method('Get',
'int',
[],
is_const=True)
## enum.h: std::string ns3::EnumValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## enum.h: void ns3::EnumValue::Set(int v) [member function]
cls.add_method('Set',
'void',
[param('int', 'v')])
return
def register_Ns3IntegerValue_methods(root_module, cls):
## integer.h: ns3::IntegerValue::IntegerValue() [constructor]
cls.add_constructor([])
## integer.h: ns3::IntegerValue::IntegerValue(ns3::IntegerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IntegerValue const &', 'arg0')])
## integer.h: ns3::IntegerValue::IntegerValue(int64_t const & value) [constructor]
cls.add_constructor([param('int64_t const &', 'value')])
## integer.h: ns3::Ptr<ns3::AttributeValue> ns3::IntegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## integer.h: bool ns3::IntegerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## integer.h: int64_t ns3::IntegerValue::Get() const [member function]
cls.add_method('Get',
'int64_t',
[],
is_const=True)
## integer.h: std::string ns3::IntegerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## integer.h: void ns3::IntegerValue::Set(int64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('int64_t const &', 'value')])
return
def register_Ns3ObjectFactoryChecker_methods(root_module, cls):
## object-factory.h: ns3::ObjectFactoryChecker::ObjectFactoryChecker() [constructor]
cls.add_constructor([])
## object-factory.h: ns3::ObjectFactoryChecker::ObjectFactoryChecker(ns3::ObjectFactoryChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryChecker const &', 'arg0')])
return
def register_Ns3ObjectFactoryValue_methods(root_module, cls):
## object-factory.h: ns3::ObjectFactoryValue::ObjectFactoryValue() [constructor]
cls.add_constructor([])
## object-factory.h: ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactoryValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectFactoryValue const &', 'arg0')])
## object-factory.h: ns3::ObjectFactoryValue::ObjectFactoryValue(ns3::ObjectFactory const & value) [constructor]
cls.add_constructor([param('ns3::ObjectFactory const &', 'value')])
## object-factory.h: ns3::Ptr<ns3::AttributeValue> ns3::ObjectFactoryValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-factory.h: bool ns3::ObjectFactoryValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-factory.h: ns3::ObjectFactory ns3::ObjectFactoryValue::Get() const [member function]
cls.add_method('Get',
'ns3::ObjectFactory',
[],
is_const=True)
## object-factory.h: std::string ns3::ObjectFactoryValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## object-factory.h: void ns3::ObjectFactoryValue::Set(ns3::ObjectFactory const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::ObjectFactory const &', 'value')])
return
def register_Ns3ObjectVectorAccessor_methods(root_module, cls):
## object-vector.h: ns3::ObjectVectorAccessor::ObjectVectorAccessor() [constructor]
cls.add_constructor([])
## object-vector.h: ns3::ObjectVectorAccessor::ObjectVectorAccessor(ns3::ObjectVectorAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectVectorAccessor const &', 'arg0')])
## object-vector.h: bool ns3::ObjectVectorAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & value) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'value')],
is_const=True, is_virtual=True)
## object-vector.h: bool ns3::ObjectVectorAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_const=True, is_virtual=True)
## object-vector.h: bool ns3::ObjectVectorAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_const=True, is_virtual=True)
## object-vector.h: bool ns3::ObjectVectorAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object'), param('ns3::AttributeValue const &', 'value')],
is_const=True, is_virtual=True)
## object-vector.h: ns3::Ptr<ns3::Object> ns3::ObjectVectorAccessor::DoGet(ns3::ObjectBase const * object, uint32_t i) const [member function]
cls.add_method('DoGet',
'ns3::Ptr< ns3::Object >',
[param('ns3::ObjectBase const *', 'object'), param('uint32_t', 'i')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## object-vector.h: bool ns3::ObjectVectorAccessor::DoGetN(ns3::ObjectBase const * object, uint32_t * n) const [member function]
cls.add_method('DoGetN',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('uint32_t *', 'n')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3ObjectVectorChecker_methods(root_module, cls):
## object-vector.h: ns3::ObjectVectorChecker::ObjectVectorChecker() [constructor]
cls.add_constructor([])
## object-vector.h: ns3::ObjectVectorChecker::ObjectVectorChecker(ns3::ObjectVectorChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectVectorChecker const &', 'arg0')])
## object-vector.h: ns3::TypeId ns3::ObjectVectorChecker::GetItemTypeId() const [member function]
cls.add_method('GetItemTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3ObjectVectorValue_methods(root_module, cls):
## object-vector.h: ns3::ObjectVectorValue::ObjectVectorValue(ns3::ObjectVectorValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectVectorValue const &', 'arg0')])
## object-vector.h: ns3::ObjectVectorValue::ObjectVectorValue() [constructor]
cls.add_constructor([])
## object-vector.h: __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Object>*,std::vector<ns3::Ptr<ns3::Object>, std::allocator<ns3::Ptr<ns3::Object> > > > ns3::ObjectVectorValue::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Object > const, std::vector< ns3::Ptr< ns3::Object > > >',
[],
is_const=True)
## object-vector.h: ns3::Ptr<ns3::AttributeValue> ns3::ObjectVectorValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## object-vector.h: bool ns3::ObjectVectorValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## object-vector.h: __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Object>*,std::vector<ns3::Ptr<ns3::Object>, std::allocator<ns3::Ptr<ns3::Object> > > > ns3::ObjectVectorValue::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Object > const, std::vector< ns3::Ptr< ns3::Object > > >',
[],
is_const=True)
## object-vector.h: ns3::Ptr<ns3::Object> ns3::ObjectVectorValue::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Object >',
[param('uint32_t', 'i')],
is_const=True)
## object-vector.h: uint32_t ns3::ObjectVectorValue::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## object-vector.h: std::string ns3::ObjectVectorValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
return
def register_Ns3PointerChecker_methods(root_module, cls):
## pointer.h: ns3::PointerChecker::PointerChecker() [constructor]
cls.add_constructor([])
## pointer.h: ns3::PointerChecker::PointerChecker(ns3::PointerChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PointerChecker const &', 'arg0')])
## pointer.h: ns3::TypeId ns3::PointerChecker::GetPointeeTypeId() const [member function]
cls.add_method('GetPointeeTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3PointerValue_methods(root_module, cls):
## pointer.h: ns3::PointerValue::PointerValue(ns3::PointerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PointerValue const &', 'arg0')])
## pointer.h: ns3::PointerValue::PointerValue() [constructor]
cls.add_constructor([])
## pointer.h: ns3::PointerValue::PointerValue(ns3::Ptr<ns3::Object> object) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Object >', 'object')])
## pointer.h: ns3::Ptr<ns3::AttributeValue> ns3::PointerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## pointer.h: bool ns3::PointerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## pointer.h: ns3::Ptr<ns3::Object> ns3::PointerValue::GetObject() const [member function]
cls.add_method('GetObject',
'ns3::Ptr< ns3::Object >',
[],
is_const=True)
## pointer.h: std::string ns3::PointerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## pointer.h: void ns3::PointerValue::SetObject(ns3::Ptr<ns3::Object> object) [member function]
cls.add_method('SetObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'object')])
return
def register_Ns3RandomVariableChecker_methods(root_module, cls):
## random-variable.h: ns3::RandomVariableChecker::RandomVariableChecker() [constructor]
cls.add_constructor([])
## random-variable.h: ns3::RandomVariableChecker::RandomVariableChecker(ns3::RandomVariableChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RandomVariableChecker const &', 'arg0')])
return
def register_Ns3RandomVariableValue_methods(root_module, cls):
## random-variable.h: ns3::RandomVariableValue::RandomVariableValue() [constructor]
cls.add_constructor([])
## random-variable.h: ns3::RandomVariableValue::RandomVariableValue(ns3::RandomVariableValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RandomVariableValue const &', 'arg0')])
## random-variable.h: ns3::RandomVariableValue::RandomVariableValue(ns3::RandomVariable const & value) [constructor]
cls.add_constructor([param('ns3::RandomVariable const &', 'value')])
## random-variable.h: ns3::Ptr<ns3::AttributeValue> ns3::RandomVariableValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## random-variable.h: bool ns3::RandomVariableValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## random-variable.h: ns3::RandomVariable ns3::RandomVariableValue::Get() const [member function]
cls.add_method('Get',
'ns3::RandomVariable',
[],
is_const=True)
## random-variable.h: std::string ns3::RandomVariableValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## random-variable.h: void ns3::RandomVariableValue::Set(ns3::RandomVariable const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::RandomVariable const &', 'value')])
return
def register_Ns3RefCountBase_methods(root_module, cls):
## ref-count-base.h: ns3::RefCountBase::RefCountBase() [constructor]
cls.add_constructor([])
## ref-count-base.h: ns3::RefCountBase::RefCountBase(ns3::RefCountBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RefCountBase const &', 'arg0')])
return
def register_Ns3StringChecker_methods(root_module, cls):
## string.h: ns3::StringChecker::StringChecker() [constructor]
cls.add_constructor([])
## string.h: ns3::StringChecker::StringChecker(ns3::StringChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::StringChecker const &', 'arg0')])
return
def register_Ns3StringValue_methods(root_module, cls):
## string.h: ns3::StringValue::StringValue() [constructor]
cls.add_constructor([])
## string.h: ns3::StringValue::StringValue(ns3::StringValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::StringValue const &', 'arg0')])
## string.h: ns3::StringValue::StringValue(std::string const & value) [constructor]
cls.add_constructor([param('std::string const &', 'value')])
## string.h: ns3::Ptr<ns3::AttributeValue> ns3::StringValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## string.h: bool ns3::StringValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## string.h: std::string ns3::StringValue::Get() const [member function]
cls.add_method('Get',
'std::string',
[],
is_const=True)
## string.h: std::string ns3::StringValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## string.h: void ns3::StringValue::Set(std::string const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h: ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h: ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h: ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h: ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h: ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h: ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h: bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h: ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h: std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h: void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3UintegerValue_methods(root_module, cls):
## uinteger.h: ns3::UintegerValue::UintegerValue() [constructor]
cls.add_constructor([])
## uinteger.h: ns3::UintegerValue::UintegerValue(ns3::UintegerValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::UintegerValue const &', 'arg0')])
## uinteger.h: ns3::UintegerValue::UintegerValue(uint64_t const & value) [constructor]
cls.add_constructor([param('uint64_t const &', 'value')])
## uinteger.h: ns3::Ptr<ns3::AttributeValue> ns3::UintegerValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## uinteger.h: bool ns3::UintegerValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## uinteger.h: uint64_t ns3::UintegerValue::Get() const [member function]
cls.add_method('Get',
'uint64_t',
[],
is_const=True)
## uinteger.h: std::string ns3::UintegerValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## uinteger.h: void ns3::UintegerValue::Set(uint64_t const & value) [member function]
cls.add_method('Set',
'void',
[param('uint64_t const &', 'value')])
return
def register_Ns3Vector2DChecker_methods(root_module, cls):
## vector.h: ns3::Vector2DChecker::Vector2DChecker() [constructor]
cls.add_constructor([])
## vector.h: ns3::Vector2DChecker::Vector2DChecker(ns3::Vector2DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DChecker const &', 'arg0')])
return
def register_Ns3Vector2DValue_methods(root_module, cls):
## vector.h: ns3::Vector2DValue::Vector2DValue() [constructor]
cls.add_constructor([])
## vector.h: ns3::Vector2DValue::Vector2DValue(ns3::Vector2DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DValue const &', 'arg0')])
## vector.h: ns3::Vector2DValue::Vector2DValue(ns3::Vector2D const & value) [constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'value')])
## vector.h: ns3::Ptr<ns3::AttributeValue> ns3::Vector2DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h: bool ns3::Vector2DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h: ns3::Vector2D ns3::Vector2DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector2D',
[],
is_const=True)
## vector.h: std::string ns3::Vector2DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h: void ns3::Vector2DValue::Set(ns3::Vector2D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector2D const &', 'value')])
return
def register_Ns3Vector3DChecker_methods(root_module, cls):
## vector.h: ns3::Vector3DChecker::Vector3DChecker() [constructor]
cls.add_constructor([])
## vector.h: ns3::Vector3DChecker::Vector3DChecker(ns3::Vector3DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DChecker const &', 'arg0')])
return
def register_Ns3Vector3DValue_methods(root_module, cls):
## vector.h: ns3::Vector3DValue::Vector3DValue() [constructor]
cls.add_constructor([])
## vector.h: ns3::Vector3DValue::Vector3DValue(ns3::Vector3DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')])
## vector.h: ns3::Vector3DValue::Vector3DValue(ns3::Vector3D const & value) [constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'value')])
## vector.h: ns3::Ptr<ns3::AttributeValue> ns3::Vector3DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h: bool ns3::Vector3DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h: ns3::Vector3D ns3::Vector3DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector3D',
[],
is_const=True)
## vector.h: std::string ns3::Vector3DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h: void ns3::Vector3DValue::Set(ns3::Vector3D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector3D const &', 'value')])
return
def register_Ns3ConfigMatchContainer_methods(root_module, cls):
## config.h: ns3::Config::MatchContainer::MatchContainer(ns3::Config::MatchContainer const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Config::MatchContainer const &', 'arg0')])
## config.h: ns3::Config::MatchContainer::MatchContainer() [constructor]
cls.add_constructor([])
## config.h: ns3::Config::MatchContainer::MatchContainer(std::vector<ns3::Ptr<ns3::Object>, std::allocator<ns3::Ptr<ns3::Object> > > const & objects, std::vector<std::string, std::allocator<std::string> > const & contexts, std::string path) [constructor]
cls.add_constructor([param('std::vector< ns3::Ptr< ns3::Object > > const &', 'objects'), param('std::vector< std::string > const &', 'contexts'), param('std::string', 'path')])
## config.h: __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Object>*,std::vector<ns3::Ptr<ns3::Object>, std::allocator<ns3::Ptr<ns3::Object> > > > ns3::Config::MatchContainer::Begin() const [member function]
cls.add_method('Begin',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Object > const, std::vector< ns3::Ptr< ns3::Object > > >',
[],
is_const=True)
## config.h: void ns3::Config::MatchContainer::Connect(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('Connect',
'void',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## config.h: void ns3::Config::MatchContainer::ConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('ConnectWithoutContext',
'void',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## config.h: void ns3::Config::MatchContainer::Disconnect(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('Disconnect',
'void',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## config.h: void ns3::Config::MatchContainer::DisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('DisconnectWithoutContext',
'void',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## config.h: __gnu_cxx::__normal_iterator<const ns3::Ptr<ns3::Object>*,std::vector<ns3::Ptr<ns3::Object>, std::allocator<ns3::Ptr<ns3::Object> > > > ns3::Config::MatchContainer::End() const [member function]
cls.add_method('End',
'__gnu_cxx::__normal_iterator< ns3::Ptr< ns3::Object > const, std::vector< ns3::Ptr< ns3::Object > > >',
[],
is_const=True)
## config.h: ns3::Ptr<ns3::Object> ns3::Config::MatchContainer::Get(uint32_t i) const [member function]
cls.add_method('Get',
'ns3::Ptr< ns3::Object >',
[param('uint32_t', 'i')],
is_const=True)
## config.h: std::string ns3::Config::MatchContainer::GetMatchedPath(uint32_t i) const [member function]
cls.add_method('GetMatchedPath',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## config.h: uint32_t ns3::Config::MatchContainer::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## config.h: std::string ns3::Config::MatchContainer::GetPath() const [member function]
cls.add_method('GetPath',
'std::string',
[],
is_const=True)
## config.h: void ns3::Config::MatchContainer::Set(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('Set',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
return
def register_functions(root_module):
module = root_module
## breakpoint.h: extern void ns3::BreakpointFallback() [free function]
module.add_function('BreakpointFallback',
'void',
[])
## vector.h: extern double ns3::CalculateDistance(ns3::Vector2D const & a, ns3::Vector2D const & b) [free function]
module.add_function('CalculateDistance',
'double',
[param('ns3::Vector2D const &', 'a'), param('ns3::Vector2D const &', 'b')])
## vector.h: extern double ns3::CalculateDistance(ns3::Vector3D const & a, ns3::Vector3D const & b) [free function]
module.add_function('CalculateDistance',
'double',
[param('ns3::Vector3D const &', 'a'), param('ns3::Vector3D const &', 'b')])
## ptr.h: extern ns3::Ptr<ns3::ObjectVectorValue> ns3::Create() [free function]
module.add_function('Create',
'ns3::Ptr< ns3::ObjectVectorValue >',
[],
template_parameters=['ns3::ObjectVectorValue'])
## ptr.h: extern ns3::Ptr<ns3::PointerValue> ns3::Create() [free function]
module.add_function('Create',
'ns3::Ptr< ns3::PointerValue >',
[],
template_parameters=['ns3::PointerValue'])
## log.h: extern void ns3::LogComponentDisable(char const * name, ns3::LogLevel level) [free function]
module.add_function('LogComponentDisable',
'void',
[param('char const *', 'name'), param('ns3::LogLevel', 'level')])
## log.h: extern void ns3::LogComponentDisableAll(ns3::LogLevel level) [free function]
module.add_function('LogComponentDisableAll',
'void',
[param('ns3::LogLevel', 'level')])
## log.h: extern void ns3::LogComponentEnable(char const * name, ns3::LogLevel level) [free function]
module.add_function('LogComponentEnable',
'void',
[param('char const *', 'name'), param('ns3::LogLevel', 'level')])
## log.h: extern void ns3::LogComponentEnableAll(ns3::LogLevel level) [free function]
module.add_function('LogComponentEnableAll',
'void',
[param('ns3::LogLevel', 'level')])
## boolean.h: extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeBooleanChecker() [free function]
module.add_function('MakeBooleanChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## callback.h: extern ns3::Callback<void, ns3::Ptr<ns3::Packet const>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::MakeBoundCallback(void (*)( ::ns3::Ptr<ns3::OutputStreamWrapper>,::ns3::Ptr<ns3::Packet const> ) * fnPtr, ns3::Ptr<ns3::OutputStreamWrapper> a) [free function]
module.add_function('MakeBoundCallback',
'ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[param('void ( * ) ( ns3::Ptr< ns3::OutputStreamWrapper >, ns3::Ptr< ns3::Packet const > ) *', 'fnPtr'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'a')],
template_parameters=['void', 'ns3::Ptr<ns3::OutputStreamWrapper>', 'ns3::Ptr<ns3::OutputStreamWrapper>', 'ns3::Ptr<ns3::Packet const>'])
## callback.h: extern ns3::Callback<void, ns3::Ptr<ns3::Packet const>, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty> ns3::MakeBoundCallback(void (*)( ::ns3::Ptr<ns3::PcapFileWrapper>,::ns3::Ptr<ns3::Packet const> ) * fnPtr, ns3::Ptr<ns3::PcapFileWrapper> a) [free function]
module.add_function('MakeBoundCallback',
'ns3::Callback< void, ns3::Ptr< ns3::Packet const >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[param('void ( * ) ( ns3::Ptr< ns3::PcapFileWrapper >, ns3::Ptr< ns3::Packet const > ) *', 'fnPtr'), param('ns3::Ptr< ns3::PcapFileWrapper >', 'a')],
template_parameters=['void', 'ns3::Ptr<ns3::PcapFileWrapper>', 'ns3::Ptr<ns3::PcapFileWrapper>', 'ns3::Ptr<ns3::Packet const>'])
## callback.h: extern ns3::Callback<void,std::basic_string<char, std::char_traits<char>, std::allocator<char> >,ns3::Ptr<const ns3::Packet>,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::MakeBoundCallback(void (*)( ::ns3::Ptr<ns3::OutputStreamWrapper>,::std::basic_string<char,std::char_traits<char>,std::allocator<char> >,::ns3::Ptr<ns3::Packet const> ) * fnPtr, ns3::Ptr<ns3::OutputStreamWrapper> a) [free function]
module.add_function('MakeBoundCallback',
'ns3::Callback< void, std::basic_string< char, std::char_traits< char >, std::allocator< char > >, ns3::Ptr< ns3::Packet const >, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[param('void ( * ) ( ns3::Ptr< ns3::OutputStreamWrapper >, std::string, ns3::Ptr< ns3::Packet const > ) *', 'fnPtr'), param('ns3::Ptr< ns3::OutputStreamWrapper >', 'a')],
template_parameters=['void', 'ns3::Ptr<ns3::OutputStreamWrapper>', 'ns3::Ptr<ns3::OutputStreamWrapper>', 'std::string', 'ns3::Ptr<ns3::Packet const>'])
## callback.h: extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeCallbackChecker() [free function]
module.add_function('MakeCallbackChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## enum.h: extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeEnumChecker(int v1, std::string n1, int v2=0, std::string n2="", int v3=0, std::string n3="", int v4=0, std::string n4="", int v5=0, std::string n5="", int v6=0, std::string n6="", int v7=0, std::string n7="", int v8=0, std::string n8="", int v9=0, std::string n9="", int v10=0, std::string n10="", int v11=0, std::string n11="", int v12=0, std::string n12="", int v13=0, std::string n13="", int v14=0, std::string n14="", int v15=0, std::string n15="", int v16=0, std::string n16="", int v17=0, std::string n17="", int v18=0, std::string n18="", int v19=0, std::string n19="", int v20=0, std::string n20="", int v21=0, std::string n21="", int v22=0, std::string n22="") [free function]
module.add_function('MakeEnumChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('int', 'v1'), param('std::string', 'n1'), param('int', 'v2', default_value='0'), param('std::string', 'n2', default_value='""'), param('int', 'v3', default_value='0'), param('std::string', 'n3', default_value='""'), param('int', 'v4', default_value='0'), param('std::string', 'n4', default_value='""'), param('int', 'v5', default_value='0'), param('std::string', 'n5', default_value='""'), param('int', 'v6', default_value='0'), param('std::string', 'n6', default_value='""'), param('int', 'v7', default_value='0'), param('std::string', 'n7', default_value='""'), param('int', 'v8', default_value='0'), param('std::string', 'n8', default_value='""'), param('int', 'v9', default_value='0'), param('std::string', 'n9', default_value='""'), param('int', 'v10', default_value='0'), param('std::string', 'n10', default_value='""'), param('int', 'v11', default_value='0'), param('std::string', 'n11', default_value='""'), param('int', 'v12', default_value='0'), param('std::string', 'n12', default_value='""'), param('int', 'v13', default_value='0'), param('std::string', 'n13', default_value='""'), param('int', 'v14', default_value='0'), param('std::string', 'n14', default_value='""'), param('int', 'v15', default_value='0'), param('std::string', 'n15', default_value='""'), param('int', 'v16', default_value='0'), param('std::string', 'n16', default_value='""'), param('int', 'v17', default_value='0'), param('std::string', 'n17', default_value='""'), param('int', 'v18', default_value='0'), param('std::string', 'n18', default_value='""'), param('int', 'v19', default_value='0'), param('std::string', 'n19', default_value='""'), param('int', 'v20', default_value='0'), param('std::string', 'n20', default_value='""'), param('int', 'v21', default_value='0'), param('std::string', 'n21', default_value='""'), param('int', 'v22', default_value='0'), param('std::string', 'n22', default_value='""')])
## object-factory.h: extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeObjectFactoryChecker() [free function]
module.add_function('MakeObjectFactoryChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## random-variable.h: extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeRandomVariableChecker() [free function]
module.add_function('MakeRandomVariableChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## string.h: extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeStringChecker() [free function]
module.add_function('MakeStringChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## type-id.h: extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeTypeIdChecker() [free function]
module.add_function('MakeTypeIdChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## vector.h: extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeVector2DChecker() [free function]
module.add_function('MakeVector2DChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## vector.h: extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeVector3DChecker() [free function]
module.add_function('MakeVector3DChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## vector.h: extern ns3::Ptr<ns3::AttributeChecker const> ns3::MakeVectorChecker() [free function]
module.add_function('MakeVectorChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[])
## test.h: extern bool ns3::TestDoubleIsEqual(double const a, double const b, double const epsilon=std::numeric_limits<double>::epsilon()) [free function]
module.add_function('TestDoubleIsEqual',
'bool',
[param('double const', 'a'), param('double const', 'b'), param('double const', 'epsilon', default_value='std::numeric_limits<double>::epsilon()')])
## type-name.h: extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['double'])
## type-name.h: extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['float'])
## type-name.h: extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['long'])
## type-name.h: extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['int'])
## type-name.h: extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['short'])
## type-name.h: extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['signed char'])
## type-name.h: extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['unsigned long'])
## type-name.h: extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['unsigned int'])
## type-name.h: extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['unsigned short'])
## type-name.h: extern std::string ns3::TypeNameGet() [free function]
module.add_function('TypeNameGet',
'std::string',
[],
template_parameters=['unsigned char'])
register_functions_ns3_Config(module.get_submodule('Config'), root_module)
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_addressUtils(module.get_submodule('addressUtils'), root_module)
register_functions_ns3_aodv(module.get_submodule('aodv'), root_module)
register_functions_ns3_dot11s(module.get_submodule('dot11s'), root_module)
register_functions_ns3_dsdv(module.get_submodule('dsdv'), root_module)
register_functions_ns3_flame(module.get_submodule('flame'), root_module)
register_functions_ns3_internal(module.get_submodule('internal'), root_module)
register_functions_ns3_olsr(module.get_submodule('olsr'), root_module)
return
def register_functions_ns3_Config(module, root_module):
## config.h: extern void ns3::Config::Connect(std::string path, ns3::CallbackBase const & cb) [free function]
module.add_function('Connect',
'void',
[param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')])
## config.h: extern void ns3::Config::ConnectWithoutContext(std::string path, ns3::CallbackBase const & cb) [free function]
module.add_function('ConnectWithoutContext',
'void',
[param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')])
## config.h: extern void ns3::Config::Disconnect(std::string path, ns3::CallbackBase const & cb) [free function]
module.add_function('Disconnect',
'void',
[param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')])
## config.h: extern void ns3::Config::DisconnectWithoutContext(std::string path, ns3::CallbackBase const & cb) [free function]
module.add_function('DisconnectWithoutContext',
'void',
[param('std::string', 'path'), param('ns3::CallbackBase const &', 'cb')])
## config.h: extern ns3::Ptr<ns3::Object> ns3::Config::GetRootNamespaceObject(uint32_t i) [free function]
module.add_function('GetRootNamespaceObject',
'ns3::Ptr< ns3::Object >',
[param('uint32_t', 'i')])
## config.h: extern uint32_t ns3::Config::GetRootNamespaceObjectN() [free function]
module.add_function('GetRootNamespaceObjectN',
'uint32_t',
[])
## config.h: extern ns3::Config::MatchContainer ns3::Config::LookupMatches(std::string path) [free function]
module.add_function('LookupMatches',
'ns3::Config::MatchContainer',
[param('std::string', 'path')])
## config.h: extern void ns3::Config::RegisterRootNamespaceObject(ns3::Ptr<ns3::Object> obj) [free function]
module.add_function('RegisterRootNamespaceObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'obj')])
## config.h: extern void ns3::Config::Set(std::string path, ns3::AttributeValue const & value) [free function]
module.add_function('Set',
'void',
[param('std::string', 'path'), param('ns3::AttributeValue const &', 'value')])
## config.h: extern void ns3::Config::SetDefault(std::string name, ns3::AttributeValue const & value) [free function]
module.add_function('SetDefault',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## config.h: extern bool ns3::Config::SetDefaultFailSafe(std::string name, ns3::AttributeValue const & value) [free function]
module.add_function('SetDefaultFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## config.h: extern void ns3::Config::SetGlobal(std::string name, ns3::AttributeValue const & value) [free function]
module.add_function('SetGlobal',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## config.h: extern bool ns3::Config::SetGlobalFailSafe(std::string name, ns3::AttributeValue const & value) [free function]
module.add_function('SetGlobalFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## config.h: extern void ns3::Config::UnregisterRootNamespaceObject(ns3::Ptr<ns3::Object> obj) [free function]
module.add_function('UnregisterRootNamespaceObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'obj')])
return
def register_functions_ns3_FatalImpl(module, root_module):
## fatal-impl.h: extern void ns3::FatalImpl::FlushStreams() [free function]
module.add_function('FlushStreams',
'void',
[])
## fatal-impl.h: extern void ns3::FatalImpl::RegisterStream(std::ostream * stream) [free function]
module.add_function('RegisterStream',
'void',
[param('std::ostream *', 'stream')])
## fatal-impl.h: extern void ns3::FatalImpl::UnregisterStream(std::ostream * stream) [free function]
module.add_function('UnregisterStream',
'void',
[param('std::ostream *', 'stream')])
return
def register_functions_ns3_addressUtils(module, root_module):
return
def register_functions_ns3_aodv(module, root_module):
return
def register_functions_ns3_dot11s(module, root_module):
return
def register_functions_ns3_dsdv(module, root_module):
return
def register_functions_ns3_flame(module, root_module):
return
def register_functions_ns3_internal(module, root_module):
## double.h: extern ns3::Ptr<ns3::AttributeChecker const> ns3::internal::MakeDoubleChecker(double min, double max, std::string name) [free function]
module.add_function('MakeDoubleChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('double', 'min'), param('double', 'max'), param('std::string', 'name')])
## integer.h: extern ns3::Ptr<ns3::AttributeChecker const> ns3::internal::MakeIntegerChecker(int64_t min, int64_t max, std::string name) [free function]
module.add_function('MakeIntegerChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('int64_t', 'min'), param('int64_t', 'max'), param('std::string', 'name')])
## uinteger.h: extern ns3::Ptr<ns3::AttributeChecker const> ns3::internal::MakeUintegerChecker(uint64_t min, uint64_t max, std::string name) [free function]
module.add_function('MakeUintegerChecker',
'ns3::Ptr< ns3::AttributeChecker const >',
[param('uint64_t', 'min'), param('uint64_t', 'max'), param('std::string', 'name')])
return
def register_functions_ns3_olsr(module, root_module):
return
|
gpl-2.0
|
dhanababum/accessdb
|
accessdb/utils.py
|
1
|
8395
|
# -*- coding: utf-8 -*-
# Copyright 2017 Dhana Babu
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import string
import tempfile
import shutil
import pypyodbc as odbc
from .access_api import create
_MS_ACCESS_TYPES = {
'BIT',
'BYTE',
'SHORT',
'LONG',
'CURRENCY',
'SINGLE',
'DOUBLE',
'DATETIME',
'TEXT',
'MEMO',
'PRIMARY', # CUSTOM Type for handling AUTOINCREMENT
}
SCHEMA_FILE = 'schema.ini'
_TEXT_SEPARATORS = {
r',': 'CSVDelimited',
r'\t': 'TabDelimited'
}
def _text_formater(sep):
separator = _TEXT_SEPARATORS.get(sep, 'Delimited({})')
return separator.format(sep)
def _stringify_path(db_path):
dtr, path = os.path.split(db_path)
if dtr == '':
db_path = os.path.join('.', path)
return db_path
def _push_access_db(temp_dir, text_file, data_columns,
header_columns, dtype, path, table_name, sep,
append, overwrite, delete='file'):
table = Table(temp_dir, text_file,
table_name,
data_columns,
header_columns,
dtype, sep, append)
schema_file = os.path.join(temp_dir, SCHEMA_FILE)
try:
with SchemaWriter(temp_dir, text_file, data_columns,
header_columns, dtype, sep, schema_file) as schema:
schema.write()
with AccessDBConnection(path, overwrite) as con:
cursor = con.cursor()
if not append:
cursor.execute(table.create_query())
cursor.execute(table.insert_query())
con.commit()
finally:
if delete == 'folder':
shutil.rmtree(temp_dir)
else:
os.unlink(schema_file)
def _get_random_file():
return ''.join(random.choice(string.ascii_lowercase) for _ in range(10))
class DataTypeNotFound(Exception):
pass
class SchemaWriter(object):
def __init__(self, temp_dir, text_file, df_columns,
columns, dtype, sep, schema_file):
self.temp_dir = temp_dir
self.text_file = text_file
self.df_columns = df_columns
self.columns = columns
self.dtype = dtype
self.sep = sep
self.path = schema_file
def __enter__(self):
self.fp = open(self.path, 'w')
return self
def __exit__(self, *args):
self.fp.close()
def formater(self):
yield '[%s]' % self.text_file
yield 'ColNameHeader=True'
yield 'Format=%s' % _text_formater(self.sep)
self.dcols = {col: ('Col%s' % (i + 1))
for i, col in enumerate(self.df_columns)}
if not isinstance(self.dtype, dict):
self.dtype = {}
for col in self.df_columns:
ctype = self.dtype.get(col, 'text').upper()
if ctype not in _MS_ACCESS_TYPES:
raise DataTypeNotFound(
'Provided Data Type Not Found %s' % ctype)
if ctype == 'PRIMARY':
ctype = 'TEXT'
yield '{c_col}="{d_col}" {c_type}'.format(
c_col=self.dcols[col],
d_col=col,
c_type=ctype.capitalize())
def write(self):
for line in self.formater():
self.fp.write(line)
self.fp.write('\n')
class Table(object):
def __init__(self, temp_dir, text_file,
table_name, df_columns, columns,
dtype, sep, append):
self.temp_dir = temp_dir
self.text_file = text_file
self.df_columns = df_columns
self.table_name = table_name
self.df_columns = df_columns
self.columns = columns
self.dtype = dtype
self.sep = sep
self.append = append
if not isinstance(self.dtype, dict):
self.dtype = {}
def _get_colunm_type(self, col):
ctype = self.dtype.get(col, 'TEXT').upper()
if ctype not in _MS_ACCESS_TYPES:
raise Exception
return ctype
def formater(self):
for col in self.df_columns:
c_type = self._get_colunm_type(col)
if c_type == 'PRIMARY':
c_type = 'AUTOINCREMENT PRIMARY KEY'
if self.columns:
if col not in self.columns:
continue
col = self.columns[col]
yield '`{c_col}` {c_type}'.format(c_col=col,
c_type=c_type)
def insert_formater(self):
for col in self.df_columns:
if self._get_colunm_type(col) == 'PRIMARY':
continue
if not self.columns:
self.columns = dict(zip(self.df_columns, self.df_columns))
if self.columns:
if col not in self.columns:
continue
cus_col = self.columns[col]
yield col, cus_col
def built_columns(self):
return '(%s)' % ','.join(self.formater())
def create_query(self):
return "CREATE TABLE `{table_name}`{columns}".format(
table_name=self.table_name,
columns=self.built_columns())
@staticmethod
def required_columns(cols):
return ','.join('`%s`' % c for c in cols)
def insert_query(self):
custom_columns = []
columns = []
for col1, col2 in self.insert_formater():
columns.append(col1)
custom_columns.append(col2)
return """
INSERT INTO `{table_name}`({columns})
SELECT {required_cols} FROM [TEXT;HDR=YES;FMT={separator};
Database={temp_dir}].{text_file}
""".format(temp_dir=self.temp_dir,
text_file=self.text_file,
columns=self.required_columns(custom_columns),
required_cols=self.required_columns(columns),
table_name=self.table_name,
separator=_text_formater(self.sep))
class AccessDBConnection(object):
def __init__(self, db_path, overwrite):
self.overwrite = overwrite
self.db_path = _stringify_path(db_path)
def __enter__(self):
if not os.path.isfile(self.db_path) or self.overwrite:
create(self.db_path)
odbc_conn_str = '''DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};
DBQ=%s''' % (self.db_path)
self.con = odbc.connect(odbc_conn_str)
return self.con
def __exit__(self, *args):
self.con.close()
def to_accessdb(self, path, table_name,
header_columns=None, dtype='str', engine='text',
sep=',', append=False, overwrite=False):
if self.empty:
return
temp_dir = tempfile.mkdtemp()
text_file = '%s.txt' % _get_random_file()
text_path = os.path.join(temp_dir, text_file)
self.to_csv(text_path, index=False)
_push_access_db(temp_dir, text_file,
self.columns.tolist(),
header_columns, dtype, path, table_name,
sep, append, overwrite, 'folder')
def create_accessdb(path, text_path, table_name,
header_columns=None, dtype='str',
engine='text', sep=',', append=False, overwrite=False):
temp_dir, text_file = os.path.split(os.path.abspath(text_path))
with open(text_path) as fp:
file_columns = fp.readline().strip('\n').split(sep)
_push_access_db(temp_dir, text_file,
file_columns,
header_columns, dtype, path, table_name,
sep, append, overwrite)
|
apache-2.0
|
jimsize/PySolFC
|
pysollib/games/harp.py
|
1
|
13061
|
#!/usr/bin/env python
# -*- mode: python; coding: utf-8; -*-
# ---------------------------------------------------------------------------##
#
# Copyright (C) 1998-2003 Markus Franz Xaver Johannes Oberhumer
# Copyright (C) 2003 Mt. Hood Playing Card Co.
# Copyright (C) 2005-2009 Skomoroh
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------##
# imports
# PySol imports
from pysollib.gamedb import registerGame, GameInfo, GI
from pysollib.mfxutil import kwdefault
from pysollib.game import Game
from pysollib.layout import Layout
from pysollib.hint import CautiousDefaultHint
from pysollib.hint import KlondikeType_Hint
from pysollib.games.spider import Spider_RowStack, Spider_SS_Foundation, \
Spider_Hint
from pysollib.util import ACE, KING
from pysollib.stack import \
AC_RowStack, \
BO_RowStack, \
KingAC_RowStack, \
SS_FoundationStack, \
Spider_SS_RowStack, \
StackWrapper, \
WasteStack, \
WasteTalonStack, \
SS_RowStack
# ************************************************************************
# * Double Klondike (Klondike with 2 decks and 9 rows)
# ************************************************************************
class DoubleKlondike(Game):
Layout_Method = staticmethod(Layout.harpLayout)
Foundation_Class = SS_FoundationStack
RowStack_Class = KingAC_RowStack
Hint_Class = KlondikeType_Hint
def createGame(self, max_rounds=-1, num_deal=1, **layout):
# create layout
l, s = Layout(self), self.s
kwdefault(layout, rows=9, waste=1, texts=1, playcards=19)
self.Layout_Method(l, **layout)
self.setSize(l.size[0], l.size[1])
# create stacks
s.talon = WasteTalonStack(l.s.talon.x, l.s.talon.y, self,
max_rounds=max_rounds, num_deal=num_deal)
s.waste = WasteStack(l.s.waste.x, l.s.waste.y, self)
for r in l.s.foundations:
s.foundations.append(
self.Foundation_Class(r.x, r.y, self, suit=r.suit))
for r in l.s.rows:
s.rows.append(self.RowStack_Class(r.x, r.y, self))
# default
l.defaultAll()
# extra
if max_rounds > 1:
anchor = 'nn'
if layout.get("texts"):
anchor = 'nnn'
l.createRoundText(s.talon, anchor)
return l
def startGame(self, flip=0):
for i in range(len(self.s.rows)):
self.s.talon.dealRow(rows=self.s.rows[i+1:], flip=flip, frames=0)
self._startAndDealRowAndCards()
shallHighlightMatch = Game._shallHighlightMatch_AC
# ************************************************************************
# * Double Klondike by Threes
# ************************************************************************
class DoubleKlondikeByThrees(DoubleKlondike):
def createGame(self):
DoubleKlondike.createGame(self, num_deal=3)
# ************************************************************************
# * Gargantua (Double Klondike with one redeal)
# * Pantagruel
# ************************************************************************
class Gargantua(DoubleKlondike):
def createGame(self):
DoubleKlondike.createGame(self, max_rounds=2)
class Pantagruel(DoubleKlondike):
RowStack_Class = AC_RowStack
def createGame(self):
DoubleKlondike.createGame(self, max_rounds=1)
# ************************************************************************
# * Harp (Double Klondike with 10 non-king rows and no redeal)
# ************************************************************************
class BigHarp(DoubleKlondike):
RowStack_Class = AC_RowStack
def createGame(self):
DoubleKlondike.createGame(self, max_rounds=1, rows=10)
#
# game overrides
#
# no real need to override, but this way the layout
# looks a little bit different
def startGame(self):
for i in range(len(self.s.rows)):
self.s.talon.dealRow(rows=self.s.rows[:i], flip=0, frames=0)
self._startAndDealRowAndCards()
# ************************************************************************
# * Steps (Harp with 7 rows)
# ************************************************************************
class Steps(DoubleKlondike):
RowStack_Class = AC_RowStack
def createGame(self):
DoubleKlondike.createGame(self, max_rounds=2, rows=7)
# ************************************************************************
# * Triple Klondike
# * Triple Klondike by Threes
# * Chinese Klondike
# ************************************************************************
class TripleKlondike(DoubleKlondike):
def createGame(self):
DoubleKlondike.createGame(self, rows=13)
class TripleKlondikeByThrees(DoubleKlondike):
def createGame(self):
DoubleKlondike.createGame(self, rows=13, num_deal=3)
class ChineseKlondike(DoubleKlondike):
RowStack_Class = StackWrapper(BO_RowStack, base_rank=KING)
def createGame(self):
DoubleKlondike.createGame(self, rows=12)
# ************************************************************************
# * Lady Jane
# * Inquisitor
# ************************************************************************
class LadyJane(DoubleKlondike):
Hint_Class = Spider_Hint
RowStack_Class = Spider_SS_RowStack
def createGame(self):
DoubleKlondike.createGame(self, rows=10, max_rounds=2, num_deal=3)
def startGame(self):
DoubleKlondike.startGame(self, flip=1)
shallHighlightMatch = Game._shallHighlightMatch_RK
getQuickPlayScore = Game._getSpiderQuickPlayScore
class Inquisitor(DoubleKlondike):
RowStack_Class = SS_RowStack
def createGame(self):
DoubleKlondike.createGame(self, rows=10, max_rounds=3, num_deal=3)
def startGame(self):
DoubleKlondike.startGame(self, flip=1)
shallHighlightMatch = Game._shallHighlightMatch_SS
# ************************************************************************
# * Arabella
# ************************************************************************
class Arabella(DoubleKlondike):
Hint_Class = Spider_Hint
RowStack_Class = StackWrapper(Spider_SS_RowStack, base_rank=KING)
def createGame(self):
DoubleKlondike.createGame(self, rows=13, max_rounds=1, playcards=24)
def startGame(self):
DoubleKlondike.startGame(self, flip=1)
shallHighlightMatch = Game._shallHighlightMatch_RK
getQuickPlayScore = Game._getSpiderQuickPlayScore
# ************************************************************************
# * Big Deal
# ************************************************************************
class BigDeal(DoubleKlondike):
RowStack_Class = KingAC_RowStack
def createGame(self, rows=12, max_rounds=2, XOFFSET=0):
l, s = Layout(self), self.s
self.setSize(l.XM+(rows+2)*l.XS, l.YM+8*l.YS)
x, y = l.XM, l.YM
for i in range(rows):
s.rows.append(self.RowStack_Class(x, y, self))
x += l.XS
for i in range(2):
y = l.YM
for j in range(8):
s.foundations.append(
SS_FoundationStack(x, y, self, suit=j % 4))
y += l.YS
x += l.XS
x, y = l.XM, self.height-l.YS
s.talon = WasteTalonStack(x, y, self, max_rounds=max_rounds)
l.createText(s.talon, 'n')
x += l.XS
s.waste = WasteStack(x, y, self)
s.waste.CARD_XOFFSET = XOFFSET
l.createText(s.waste, 'n')
if max_rounds > 1:
l.createRoundText(s.talon, 'nnn')
self.setRegion(s.rows, (-999, -999, l.XM+rows*l.XS-l.CW//2, 999999),
priority=1)
l.defaultStackGroups()
# ************************************************************************
# * Delivery
# ************************************************************************
class Delivery(BigDeal):
Hint_Class = CautiousDefaultHint
RowStack_Class = StackWrapper(SS_RowStack, max_move=1)
def createGame(self):
dx = self.app.images.CARDW//10
BigDeal.createGame(self, rows=12, max_rounds=1, XOFFSET=dx)
shallHighlightMatch = Game._shallHighlightMatch_SS
def startGame(self):
self._startDealNumRows(2)
self.s.talon.dealRow()
self.s.talon.dealCards() # deal first card to WasteStack
# ************************************************************************
# * Double Kingsley
# ************************************************************************
class DoubleKingsley(DoubleKlondike):
Foundation_Class = StackWrapper(SS_FoundationStack, base_rank=KING, dir=-1)
RowStack_Class = StackWrapper(KingAC_RowStack, base_rank=ACE, dir=1)
def createGame(self):
DoubleKlondike.createGame(self, max_rounds=1)
# ************************************************************************
# * Thieves of Egypt
# ************************************************************************
class ThievesOfEgypt(DoubleKlondike):
Layout_Method = staticmethod(Layout.klondikeLayout)
def createGame(self):
DoubleKlondike.createGame(self, rows=10, max_rounds=2)
def startGame(self):
# rows: 1 3 5 7 9 10 8 6 4 2
row = 0
for i in (0, 2, 4, 6, 8, 9, 7, 5, 3, 1):
for j in range(i):
self.s.talon.dealRow(rows=[self.s.rows[row]], frames=0)
row += 1
self._startAndDealRowAndCards()
# ************************************************************************
# * Brush
# ************************************************************************
class Brush(DoubleKlondike):
Layout_Method = staticmethod(Layout.klondikeLayout)
Foundation_Class = Spider_SS_Foundation
RowStack_Class = Spider_RowStack
Hint_Class = Spider_Hint
def createGame(self):
DoubleKlondike.createGame(self, rows=10, max_rounds=1)
def startGame(self):
self._startDealNumRows(3)
self.s.talon.dealRow()
self.s.talon.dealCards() # deal first card to WasteStack
shallHighlightMatch = Game._shallHighlightMatch_RK
getQuickPlayScore = Game._getSpiderQuickPlayScore
# register the game
registerGame(GameInfo(21, DoubleKlondike, "Double Klondike",
GI.GT_KLONDIKE, 2, -1, GI.SL_BALANCED))
registerGame(GameInfo(28, DoubleKlondikeByThrees, "Double Klondike by Threes",
GI.GT_KLONDIKE, 2, -1, GI.SL_MOSTLY_LUCK))
registerGame(GameInfo(25, Gargantua, "Gargantua",
GI.GT_KLONDIKE, 2, 1, GI.SL_BALANCED))
registerGame(GameInfo(15, BigHarp, "Big Harp",
GI.GT_KLONDIKE, 2, 0, GI.SL_BALANCED))
registerGame(GameInfo(51, Steps, "Steps",
GI.GT_KLONDIKE, 2, 1, GI.SL_BALANCED))
registerGame(GameInfo(273, TripleKlondike, "Triple Klondike",
GI.GT_KLONDIKE, 3, -1, GI.SL_BALANCED))
registerGame(GameInfo(274, TripleKlondikeByThrees, "Triple Klondike by Threes",
GI.GT_KLONDIKE, 3, -1, GI.SL_MOSTLY_LUCK))
registerGame(GameInfo(495, LadyJane, "Lady Jane",
GI.GT_KLONDIKE, 2, 1, GI.SL_BALANCED))
registerGame(GameInfo(496, Inquisitor, "Inquisitor",
GI.GT_KLONDIKE, 2, 2, GI.SL_BALANCED))
registerGame(GameInfo(497, Arabella, "Arabella",
GI.GT_KLONDIKE, 3, 0, GI.SL_BALANCED))
registerGame(GameInfo(545, BigDeal, "Big Deal",
GI.GT_KLONDIKE | GI.GT_ORIGINAL, 4, 1, GI.SL_BALANCED))
registerGame(GameInfo(562, Delivery, "Delivery",
GI.GT_FORTY_THIEVES | GI.GT_ORIGINAL, 4, 0,
GI.SL_BALANCED))
registerGame(GameInfo(590, ChineseKlondike, "Chinese Klondike",
GI.GT_KLONDIKE, 3, -1, GI.SL_BALANCED,
suits=(0, 1, 2)))
registerGame(GameInfo(591, Pantagruel, "Pantagruel",
GI.GT_KLONDIKE, 2, 0, GI.SL_BALANCED))
registerGame(GameInfo(668, DoubleKingsley, "Double Kingsley",
GI.GT_KLONDIKE, 2, 0, GI.SL_BALANCED))
registerGame(GameInfo(678, ThievesOfEgypt, "Thieves of Egypt",
GI.GT_KLONDIKE, 2, 1, GI.SL_BALANCED))
registerGame(GameInfo(689, Brush, "Brush",
GI.GT_2DECK_TYPE | GI.GT_ORIGINAL, 2, 0,
GI.SL_MOSTLY_SKILL))
|
gpl-3.0
|
iafan/zing
|
tests/forms/project.py
|
1
|
1228
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
# Copyright (C) Zing contributors.
#
# This file is a part of the Zing project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
from pootle_app.forms import ProjectForm
from pootle_project.models import PROJECT_CHECKERS, RESERVED_PROJECT_CODES
@pytest.mark.parametrize('reserved_code', RESERVED_PROJECT_CODES)
@pytest.mark.django_db
def test_clean_code_invalid(reserved_code):
form_data = {
'code': reserved_code,
'checkstyle': PROJECT_CHECKERS.keys()[0],
'fullname': 'Foo',
'source_language': 1,
}
form = ProjectForm(form_data)
assert not form.is_valid()
assert 'code' in form.errors
assert len(form.errors.keys()) == 1
@pytest.mark.django_db
def test_clean_code_blank_invalid():
form_data = {
'code': ' ',
'checkstyle': PROJECT_CHECKERS.keys()[0],
'fullname': 'Foo',
'source_language': 1,
}
form = ProjectForm(form_data)
assert not form.is_valid()
assert 'code' in form.errors
assert len(form.errors.keys()) == 1
|
gpl-3.0
|
etherkit/OpenBeacon2
|
client/linux-arm/venv/lib/python3.6/site-packages/PyInstaller/hooks/hook-gi.repository.GdkPixbuf.py
|
1
|
6760
|
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2020, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
Import hook for PyGObject's "gi.repository.GdkPixbuf" package.
"""
import glob
import os
import subprocess
from PyInstaller.config import CONF
from PyInstaller.compat import (
exec_command_stdout, is_darwin, is_win, is_linux, open_file, which)
from PyInstaller.utils.hooks import (
collect_glib_translations, get_gi_typelibs, get_gi_libdir, logger)
loaders_path = os.path.join('gdk-pixbuf-2.0', '2.10.0', 'loaders')
destpath = "lib/gdk-pixbuf-2.0/2.10.0/loaders"
cachedest = "lib/gdk-pixbuf-2.0/2.10.0"
# If the "gdk-pixbuf-query-loaders" command is not in the current ${PATH}, or
# is not in the GI lib path, GDK and thus GdkPixbuf is unavailable. Return with
# a non-fatal warning.
gdk_pixbuf_query_loaders = None
try:
libdir = get_gi_libdir('GdkPixbuf', '2.0')
except ValueError:
logger.warning(
'"hook-gi.repository.GdkPixbuf" ignored, '
'since GdkPixbuf library not found'
)
libdir = None
if libdir:
# Distributions either package gdk-pixbuf-query-loaders in the GI libs
# directory (not on the path), or on the path with or without a -x64 suffix
# depending on the architecture
cmds = [
os.path.join(libdir, 'gdk-pixbuf-2.0/gdk-pixbuf-query-loaders'),
'gdk-pixbuf-query-loaders-64',
'gdk-pixbuf-query-loaders',
]
for cmd in cmds:
gdk_pixbuf_query_loaders = which(cmd)
if gdk_pixbuf_query_loaders is not None:
break
if gdk_pixbuf_query_loaders is None:
logger.warning(
'"hook-gi.repository.GdkPixbuf" ignored, since '
'"gdk-pixbuf-query-loaders" is not in $PATH or gi lib dir.'
)
# Else, GDK is available. Let's do this.
else:
binaries, datas, hiddenimports = get_gi_typelibs('GdkPixbuf', '2.0')
datas += collect_glib_translations('gdk-pixbuf')
# To add support for a new platform, add a new "elif" branch below with
# the proper is_<platform>() test and glob for finding loaders on that
# platform.
if is_win:
ext = "*.dll"
elif is_darwin or is_linux:
ext = "*.so"
# If loader detection is supported on this platform, bundle all
# detected loaders and an updated loader cache.
if ext:
loader_libs = []
# Bundle all found loaders with this user application.
pattern = os.path.join(libdir, loaders_path, ext)
for f in glob.glob(pattern):
binaries.append((f, destpath))
loader_libs.append(f)
# Sometimes the loaders are stored in a different directory from
# the library (msys2)
if not loader_libs:
pattern = os.path.join(libdir, '..', 'lib', loaders_path, ext)
for f in glob.glob(pattern):
binaries.append((f, destpath))
loader_libs.append(f)
# Filename of the loader cache to be written below.
cachefile = os.path.join(CONF['workpath'], 'loaders.cache')
# Run the "gdk-pixbuf-query-loaders" command and capture its
# standard output providing an updated loader cache; then write
# this output to the loader cache bundled with this frozen
# application.
#
# On OSX we use @executable_path to specify a path relative to the
# generated bundle. However, on non-Windows we need to rewrite the
# loader cache because it isn't relocatable by default. See
# https://bugzilla.gnome.org/show_bug.cgi?id=737523
#
# To make it easier to rewrite, we just always write
# @executable_path, since its significantly easier to find/replace
# at runtime. :)
#
# If we need to rewrite it...
if not is_win:
# To permit string munging, decode the encoded bytes output by
# this command (i.e., enable the "universal_newlines" option).
# Note that:
#
# * Under Python 2.7, "cachedata" will be a decoded "unicode"
# object. * Under Python 3.x, "cachedata" will be a decoded
# "str" object.
#
# On Fedora, the default loaders cache is /usr/lib64, but the
# libdir is actually /lib64. To get around this, we pass the
# path to the loader command, and it will create a cache with
# the right path.
cachedata = exec_command_stdout(gdk_pixbuf_query_loaders,
*loader_libs)
cd = []
prefix = '"' + os.path.join(libdir, 'gdk-pixbuf-2.0', '2.10.0')
plen = len(prefix)
# For each line in the updated loader cache...
for line in cachedata.splitlines():
if line.startswith('#'):
continue
if line.startswith(prefix):
line = '"@executable_path/' + cachedest + line[plen:]
cd.append(line)
# Rejoin these lines in a manner preserving this object's
# "unicode" type under Python 2.
cachedata = u'\n'.join(cd)
# Write the updated loader cache to this file.
with open_file(cachefile, 'w') as fp:
fp.write(cachedata)
# Else, GdkPixbuf will do the right thing on Windows, so no changes
# to the loader cache are required. For efficiency and reliability,
# this command's encoded byte output is written as is without being
# decoded.
else:
with open_file(cachefile, 'wb') as fp:
fp.write(subprocess.check_output(gdk_pixbuf_query_loaders))
# Bundle this loader cache with this frozen application.
datas.append((cachefile, cachedest))
# Else, loader detection is unsupported on this platform.
else:
logger.warning(
'GdkPixbuf loader bundling unsupported on your platform.'
)
|
gpl-3.0
|
cindyyu/kuma
|
vendor/packages/translate/tools/test_podebug.py
|
25
|
6654
|
# -*- coding: utf-8 -*-
from translate.storage import base, po, xliff
from translate.tools import podebug
PO_DOC = """
msgid "This is a %s test, hooray."
msgstr ""
"""
XLIFF_DOC = """<?xml version='1.0' encoding='utf-8'?>
<xliff xmlns="urn:oasis:names:tc:xliff:document:1.1" version="1.1">
<file original="NoName" source-language="en" datatype="plaintext">
<body>
<trans-unit id="office:document-content[0]/office:body[0]/office:text[0]/text:p[0]">
<source>This <g id="0">is a</g> test <x id="1" xid="office:document-content[0]/office:body[0]/office:text[0]/text:p[0]/text:note[0]"/>, hooray.</source>
</trans-unit>
</body>
</file>
</xliff>
"""
class TestPODebug:
debug = podebug.podebug()
def setup_method(self, method):
self.postore = po.pofile(PO_DOC)
self.xliffstore = xliff.xlifffile(XLIFF_DOC)
def test_ignore_gtk(self):
"""Test operation of GTK message ignoring"""
unit = base.TranslationUnit("default:LTR")
assert self.debug.ignore_gtk(unit)
def test_keep_target(self):
"""Test that we use the target for rewriting if it exists."""
unit = base.TranslationUnit(u"blie")
unit.target = u"bla"
debugger = podebug.podebug(rewritestyle="xxx")
unit = debugger.convertunit(unit, "")
assert unit.target == u"xxxblaxxx"
unit.target = u"d%d"
debugger = podebug.podebug(rewritestyle="flipped")
unit = debugger.convertunit(unit, "")
assert unit.target == u"\u202ep%d"
def test_rewrite_blank(self):
"""Test the blank rewrite function"""
assert str(self.debug.rewrite_blank(u"Test")) == u""
def test_rewrite_en(self):
"""Test the en rewrite function"""
assert str(self.debug.rewrite_en(u"Test")) == u"Test"
def test_rewrite_xxx(self):
"""Test the xxx rewrite function"""
assert str(self.debug.rewrite_xxx(u"Test")) == u"xxxTestxxx"
assert str(self.debug.rewrite_xxx(u"Newline\n")) == u"xxxNewlinexxx\n"
def test_rewrite_bracket(self):
"""Test the bracket rewrite function"""
assert str(self.debug.rewrite_bracket(u"Test")) == u"[Test]"
assert str(self.debug.rewrite_bracket(u"Newline\n")) == u"[Newline]\n"
def test_rewrite_unicode(self):
"""Test the unicode rewrite function"""
assert unicode(self.debug.rewrite_unicode(u"Test")) == u"Ŧḗşŧ"
def test_rewrite_flipped(self):
"""Test the unicode rewrite function"""
assert unicode(self.debug.rewrite_flipped(u"Test")) == u"\u202e⊥ǝsʇ"
# alternative with reversed string and no RTL override:
#assert unicode(self.debug.rewrite_flipped("Test")) == u"ʇsǝ⊥"
# Chars < ! and > z are returned as is
assert unicode(self.debug.rewrite_flipped(u" ")) == u"\u202e "
assert unicode(self.debug.rewrite_flipped(u"©")) == u"\u202e©"
def test_rewrite_chef(self):
"""Test the chef rewrite function
This is not realy critical to test but a simple tests ensures
that it stays working.
"""
assert str(self.debug.rewrite_chef(u"Mock Swedish test you muppet")) == u"Mock Swedish test yooo mooppet"
def test_po_variables(self):
debug = podebug.podebug(rewritestyle='unicode')
po_out = debug.convertstore(self.postore)
in_unit = self.postore.units[0]
out_unit = po_out.units[0]
assert in_unit.source == out_unit.source
print(out_unit.target)
print(str(po_out))
rewrite_func = self.debug.rewrite_unicode
assert out_unit.target == u"%s%%s%s" % (rewrite_func(u'This is a '), rewrite_func(u' test, hooray.'))
def test_xliff_rewrite(self):
debug = podebug.podebug(rewritestyle='xxx')
xliff_out = debug.convertstore(self.xliffstore)
in_unit = self.xliffstore.units[0]
out_unit = xliff_out.units[0]
assert in_unit.source == out_unit.source
print(out_unit.target)
print(str(xliff_out))
assert out_unit.target == u'xxx%sxxx' % (in_unit.source)
def test_hash(self):
po_docs = ("""
msgid "Test msgid 1"
msgstr "Test msgstr 1"
""",
"""
msgctxt "test context"
msgid "Test msgid 2"
msgstr "Test msgstr 2"
""",
"""
# Test comment 3
msgctxt "test context 3"
msgid "Test msgid 3"
msgstr "Test msgstr 3"
""")
debugs = (podebug.podebug(format="%h "),
podebug.podebug(format="%6h."),
podebug.podebug(format="zzz%7h.zzz"),
podebug.podebug(format="%f %F %b %B %d %s "),
podebug.podebug(format="%3f %4F %5b %6B %7d %8s "),
podebug.podebug(format="%cf %cF %cb %cB %cd %cs "),
podebug.podebug(format="%3cf %4cF %5cb %6cB %7cd %8cs "),)
results = ["85a9 Test msgstr 1", "a15d Test msgstr 2", "6398 Test msgstr 3",
"85a917.Test msgstr 1", "a15d71.Test msgstr 2", "639898.Test msgstr 3",
"zzz85a9170.zzzTest msgstr 1", "zzza15d718.zzzTest msgstr 2", "zzz639898c.zzzTest msgstr 3",
"fullpath/to/fakefile fullpath/to/fakefile.po fakefile fakefile.po fullpath/to full-t-fake Test msgstr 1",
"fullpath/to/fakefile fullpath/to/fakefile.po fakefile fakefile.po fullpath/to full-t-fake Test msgstr 2",
"fullpath/to/fakefile fullpath/to/fakefile.po fakefile fakefile.po fullpath/to full-t-fake Test msgstr 3",
"ful full fakef fakefi fullpat full-t-f Test msgstr 1",
"ful full fakef fakefi fullpat full-t-f Test msgstr 2",
"ful full fakef fakefi fullpat full-t-f Test msgstr 3",
"fllpth/t/fkfl fllpth/t/fkfl.p fkfl fkfl.p fllpth/t fll-t-fk Test msgstr 1",
"fllpth/t/fkfl fllpth/t/fkfl.p fkfl fkfl.p fllpth/t fll-t-fk Test msgstr 2",
"fllpth/t/fkfl fllpth/t/fkfl.p fkfl fkfl.p fllpth/t fll-t-fk Test msgstr 3",
"fll fllp fkfl fkfl.p fllpth/ fll-t-fk Test msgstr 1",
"fll fllp fkfl fkfl.p fllpth/ fll-t-fk Test msgstr 2",
"fll fllp fkfl fkfl.p fllpth/ fll-t-fk Test msgstr 3"]
for debug in debugs:
for po_doc in po_docs:
postore = po.pofile(po_doc)
postore.filename = "fullpath/to/fakefile.po"
po_out = debug.convertstore(postore)
in_unit = postore.units[0]
out_unit = po_out.units[0]
assert in_unit.source == out_unit.source
assert out_unit.target == results.pop(0)
|
mpl-2.0
|
JenSte/pyqtgraph
|
examples/optics/pyoptic.py
|
18
|
18439
|
# -*- coding: utf-8 -*-
import pyqtgraph as pg
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import csv, gzip, os
from pyqtgraph import Point
class GlassDB:
"""
Database of dispersion coefficients for Schott glasses
+ Corning 7980
"""
def __init__(self, fileName='schott_glasses.csv'):
path = os.path.dirname(__file__)
fh = gzip.open(os.path.join(path, 'schott_glasses.csv.gz'), 'rb')
r = csv.reader(map(str, fh.readlines()))
lines = [x for x in r]
self.data = {}
header = lines[0]
for l in lines[1:]:
info = {}
for i in range(1, len(l)):
info[header[i]] = l[i]
self.data[l[0]] = info
self.data['Corning7980'] = { ## Thorlabs UV fused silica--not in schott catalog.
'B1': 0.68374049400,
'B2': 0.42032361300,
'B3': 0.58502748000,
'C1': 0.00460352869,
'C2': 0.01339688560,
'C3': 64.49327320000,
'TAUI25/250': 0.95, ## transmission data is fabricated, but close.
'TAUI25/1400': 0.98,
}
for k in self.data:
self.data[k]['ior_cache'] = {}
def ior(self, glass, wl):
"""
Return the index of refraction for *glass* at wavelength *wl*.
The *glass* argument must be a key in self.data.
"""
info = self.data[glass]
cache = info['ior_cache']
if wl not in cache:
B = list(map(float, [info['B1'], info['B2'], info['B3']]))
C = list(map(float, [info['C1'], info['C2'], info['C3']]))
w2 = (wl/1000.)**2
n = np.sqrt(1.0 + (B[0]*w2 / (w2-C[0])) + (B[1]*w2 / (w2-C[1])) + (B[2]*w2 / (w2-C[2])))
cache[wl] = n
return cache[wl]
def transmissionCurve(self, glass):
data = self.data[glass]
keys = [int(x[7:]) for x in data.keys() if 'TAUI25' in x]
keys.sort()
curve = np.empty((2,len(keys)))
for i in range(len(keys)):
curve[0][i] = keys[i]
key = 'TAUI25/%d' % keys[i]
val = data[key]
if val == '':
val = 0
else:
val = float(val)
curve[1][i] = val
return curve
GLASSDB = GlassDB()
def wlPen(wl):
"""Return a pen representing the given wavelength"""
l1 = 400
l2 = 700
hue = np.clip(((l2-l1) - (wl-l1)) * 0.8 / (l2-l1), 0, 0.8)
val = 1.0
if wl > 700:
val = 1.0 * (((700-wl)/700.) + 1)
elif wl < 400:
val = wl * 1.0/400.
#print hue, val
color = pg.hsvColor(hue, 1.0, val)
pen = pg.mkPen(color)
return pen
class ParamObj:
# Just a helper for tracking parameters and responding to changes
def __init__(self):
self.__params = {}
def __setitem__(self, item, val):
self.setParam(item, val)
def setParam(self, param, val):
self.setParams(**{param:val})
def setParams(self, **params):
"""Set parameters for this optic. This is a good function to override for subclasses."""
self.__params.update(params)
self.paramStateChanged()
def paramStateChanged(self):
pass
def __getitem__(self, item):
return self.getParam(item)
def getParam(self, param):
return self.__params[param]
class Optic(pg.GraphicsObject, ParamObj):
sigStateChanged = QtCore.Signal()
def __init__(self, gitem, **params):
ParamObj.__init__(self)
pg.GraphicsObject.__init__(self) #, [0,0], [1,1])
self.gitem = gitem
self.surfaces = gitem.surfaces
gitem.setParentItem(self)
self.roi = pg.ROI([0,0], [1,1])
self.roi.addRotateHandle([1, 1], [0.5, 0.5])
self.roi.setParentItem(self)
defaults = {
'pos': Point(0,0),
'angle': 0,
}
defaults.update(params)
self._ior_cache = {}
self.roi.sigRegionChanged.connect(self.roiChanged)
self.setParams(**defaults)
def updateTransform(self):
self.resetTransform()
self.setPos(0, 0)
self.translate(Point(self['pos']))
self.rotate(self['angle'])
def setParam(self, param, val):
ParamObj.setParam(self, param, val)
def paramStateChanged(self):
"""Some parameters of the optic have changed."""
# Move graphics item
self.gitem.setPos(Point(self['pos']))
self.gitem.resetTransform()
self.gitem.rotate(self['angle'])
# Move ROI to match
try:
self.roi.sigRegionChanged.disconnect(self.roiChanged)
br = self.gitem.boundingRect()
o = self.gitem.mapToParent(br.topLeft())
self.roi.setAngle(self['angle'])
self.roi.setPos(o)
self.roi.setSize([br.width(), br.height()])
finally:
self.roi.sigRegionChanged.connect(self.roiChanged)
self.sigStateChanged.emit()
def roiChanged(self, *args):
pos = self.roi.pos()
# rotate gitem temporarily so we can decide where it will need to move
self.gitem.resetTransform()
self.gitem.rotate(self.roi.angle())
br = self.gitem.boundingRect()
o1 = self.gitem.mapToParent(br.topLeft())
self.setParams(angle=self.roi.angle(), pos=pos + (self.gitem.pos() - o1))
def boundingRect(self):
return QtCore.QRectF()
def paint(self, p, *args):
pass
def ior(self, wavelength):
return GLASSDB.ior(self['glass'], wavelength)
class Lens(Optic):
def __init__(self, **params):
defaults = {
'dia': 25.4, ## diameter of lens
'r1': 50., ## positive means convex, use 0 for planar
'r2': 0, ## negative means convex
'd': 4.0,
'glass': 'N-BK7',
'reflect': False,
}
defaults.update(params)
d = defaults.pop('d')
defaults['x1'] = -d/2.
defaults['x2'] = d/2.
gitem = CircularSolid(brush=(100, 100, 130, 100), **defaults)
Optic.__init__(self, gitem, **defaults)
def propagateRay(self, ray):
"""Refract, reflect, absorb, and/or scatter ray. This function may create and return new rays"""
"""
NOTE:: We can probably use this to compute refractions faster: (from GLSL 120 docs)
For the incident vector I and surface normal N, and the
ratio of indices of refraction eta, return the refraction
vector. The result is computed by
k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I))
if (k < 0.0)
return genType(0.0)
else
return eta * I - (eta * dot(N, I) + sqrt(k)) * N
The input parameters for the incident vector I and the
surface normal N must already be normalized to get the
desired results. eta == ratio of IORs
For reflection:
For the incident vector I and surface orientation N,
returns the reflection direction:
I – 2 ∗ dot(N, I) ∗ N
N must already be normalized in order to achieve the
desired result.
"""
iors = [self.ior(ray['wl']), 1.0]
for i in [0,1]:
surface = self.surfaces[i]
ior = iors[i]
p1, ai = surface.intersectRay(ray)
#print "surface intersection:", p1, ai*180/3.14159
#trans = self.sceneTransform().inverted()[0] * surface.sceneTransform()
#p1 = trans.map(p1)
if p1 is None:
ray.setEnd(None)
break
p1 = surface.mapToItem(ray, p1)
#print "adjusted position:", p1
#ior = self.ior(ray['wl'])
rd = ray['dir']
a1 = np.arctan2(rd[1], rd[0])
ar = a1 - ai + np.arcsin((np.sin(ai) * ray['ior'] / ior))
#print [x for x in [a1, ai, (np.sin(ai) * ray['ior'] / ior), ar]]
#print ai, np.sin(ai), ray['ior'], ior
ray.setEnd(p1)
dp = Point(np.cos(ar), np.sin(ar))
#p2 = p1+dp
#p1p = self.mapToScene(p1)
#p2p = self.mapToScene(p2)
#dpp = Point(p2p-p1p)
ray = Ray(parent=ray, ior=ior, dir=dp)
return [ray]
class Mirror(Optic):
def __init__(self, **params):
defaults = {
'r1': 0,
'r2': 0,
'd': 0.01,
}
defaults.update(params)
d = defaults.pop('d')
defaults['x1'] = -d/2.
defaults['x2'] = d/2.
gitem = CircularSolid(brush=(100,100,100,255), **defaults)
Optic.__init__(self, gitem, **defaults)
def propagateRay(self, ray):
"""Refract, reflect, absorb, and/or scatter ray. This function may create and return new rays"""
surface = self.surfaces[0]
p1, ai = surface.intersectRay(ray)
if p1 is not None:
p1 = surface.mapToItem(ray, p1)
rd = ray['dir']
a1 = np.arctan2(rd[1], rd[0])
ar = a1 + np.pi - 2*ai
ray.setEnd(p1)
dp = Point(np.cos(ar), np.sin(ar))
ray = Ray(parent=ray, dir=dp)
else:
ray.setEnd(None)
return [ray]
class CircularSolid(pg.GraphicsObject, ParamObj):
"""GraphicsObject with two circular or flat surfaces."""
def __init__(self, pen=None, brush=None, **opts):
"""
Arguments for each surface are:
x1,x2 - position of center of _physical surface_
r1,r2 - radius of curvature
d1,d2 - diameter of optic
"""
defaults = dict(x1=-2, r1=100, d1=25.4, x2=2, r2=100, d2=25.4)
defaults.update(opts)
ParamObj.__init__(self)
self.surfaces = [CircleSurface(defaults['r1'], defaults['d1']), CircleSurface(-defaults['r2'], defaults['d2'])]
pg.GraphicsObject.__init__(self)
for s in self.surfaces:
s.setParentItem(self)
if pen is None:
self.pen = pg.mkPen((220,220,255,200), width=1, cosmetic=True)
else:
self.pen = pg.mkPen(pen)
if brush is None:
self.brush = pg.mkBrush((230, 230, 255, 30))
else:
self.brush = pg.mkBrush(brush)
self.setParams(**defaults)
def paramStateChanged(self):
self.updateSurfaces()
def updateSurfaces(self):
self.surfaces[0].setParams(self['r1'], self['d1'])
self.surfaces[1].setParams(-self['r2'], self['d2'])
self.surfaces[0].setPos(self['x1'], 0)
self.surfaces[1].setPos(self['x2'], 0)
self.path = QtGui.QPainterPath()
self.path.connectPath(self.surfaces[0].path.translated(self.surfaces[0].pos()))
self.path.connectPath(self.surfaces[1].path.translated(self.surfaces[1].pos()).toReversed())
self.path.closeSubpath()
def boundingRect(self):
return self.path.boundingRect()
def shape(self):
return self.path
def paint(self, p, *args):
p.setRenderHints(p.renderHints() | p.Antialiasing)
p.setPen(self.pen)
p.fillPath(self.path, self.brush)
p.drawPath(self.path)
class CircleSurface(pg.GraphicsObject):
def __init__(self, radius=None, diameter=None):
"""center of physical surface is at 0,0
radius is the radius of the surface. If radius is None, the surface is flat.
diameter is of the optic's edge."""
pg.GraphicsObject.__init__(self)
self.r = radius
self.d = diameter
self.mkPath()
def setParams(self, r, d):
self.r = r
self.d = d
self.mkPath()
def mkPath(self):
self.prepareGeometryChange()
r = self.r
d = self.d
h2 = d/2.
self.path = QtGui.QPainterPath()
if r == 0: ## flat surface
self.path.moveTo(0, h2)
self.path.lineTo(0, -h2)
else:
## half-height of surface can't be larger than radius
h2 = min(h2, abs(r))
#dx = abs(r) - (abs(r)**2 - abs(h2)**2)**0.5
#p.moveTo(-d*w/2.+ d*dx, d*h2)
arc = QtCore.QRectF(0, -r, r*2, r*2)
#self.surfaces.append((arc.center(), r, h2))
a1 = np.arcsin(h2/r) * 180. / np.pi
a2 = -2*a1
a1 += 180.
self.path.arcMoveTo(arc, a1)
self.path.arcTo(arc, a1, a2)
#if d == -1:
#p1 = QtGui.QPainterPath()
#p1.addRect(arc)
#self.paths.append(p1)
self.h2 = h2
def boundingRect(self):
return self.path.boundingRect()
def paint(self, p, *args):
return ## usually we let the optic draw.
#p.setPen(pg.mkPen('r'))
#p.drawPath(self.path)
def intersectRay(self, ray):
## return the point of intersection and the angle of incidence
#print "intersect ray"
h = self.h2
r = self.r
p, dir = ray.currentState(relativeTo=self) # position and angle of ray in local coords.
#print " ray: ", p, dir
p = p - Point(r, 0) ## move position so center of circle is at 0,0
#print " adj: ", p, r
if r == 0:
#print " flat"
if dir[0] == 0:
y = 0
else:
y = p[1] - p[0] * dir[1]/dir[0]
if abs(y) > h:
return None, None
else:
return (Point(0, y), np.arctan2(dir[1], dir[0]))
else:
#print " curve"
## find intersection of circle and line (quadratic formula)
dx = dir[0]
dy = dir[1]
dr = (dx**2 + dy**2) ** 0.5
D = p[0] * (p[1]+dy) - (p[0]+dx) * p[1]
idr2 = 1.0 / dr**2
disc = r**2 * dr**2 - D**2
if disc < 0:
return None, None
disc2 = disc**0.5
if dy < 0:
sgn = -1
else:
sgn = 1
br = self.path.boundingRect()
x1 = (D*dy + sgn*dx*disc2) * idr2
y1 = (-D*dx + abs(dy)*disc2) * idr2
if br.contains(x1+r, y1):
pt = Point(x1, y1)
else:
x2 = (D*dy - sgn*dx*disc2) * idr2
y2 = (-D*dx - abs(dy)*disc2) * idr2
pt = Point(x2, y2)
if not br.contains(x2+r, y2):
return None, None
raise Exception("No intersection!")
norm = np.arctan2(pt[1], pt[0])
if r < 0:
norm += np.pi
#print " norm:", norm*180/3.1415
dp = p - pt
#print " dp:", dp
ang = np.arctan2(dp[1], dp[0])
#print " ang:", ang*180/3.1415
#print " ai:", (ang-norm)*180/3.1415
#print " intersection:", pt
return pt + Point(r, 0), ang-norm
class Ray(pg.GraphicsObject, ParamObj):
"""Represents a single straight segment of a ray"""
sigStateChanged = QtCore.Signal()
def __init__(self, **params):
ParamObj.__init__(self)
defaults = {
'ior': 1.0,
'wl': 500,
'end': None,
'dir': Point(1,0),
}
self.params = {}
pg.GraphicsObject.__init__(self)
self.children = []
parent = params.get('parent', None)
if parent is not None:
defaults['start'] = parent['end']
defaults['wl'] = parent['wl']
self['ior'] = parent['ior']
self['dir'] = parent['dir']
parent.addChild(self)
defaults.update(params)
defaults['dir'] = Point(defaults['dir'])
self.setParams(**defaults)
self.mkPath()
def clearChildren(self):
for c in self.children:
c.clearChildren()
c.setParentItem(None)
self.scene().removeItem(c)
self.children = []
def paramStateChanged(self):
pass
def addChild(self, ch):
self.children.append(ch)
ch.setParentItem(self)
def currentState(self, relativeTo=None):
pos = self['start']
dir = self['dir']
if relativeTo is None:
return pos, dir
else:
trans = self.itemTransform(relativeTo)[0]
p1 = trans.map(pos)
p2 = trans.map(pos + dir)
return Point(p1), Point(p2-p1)
def setEnd(self, end):
self['end'] = end
self.mkPath()
def boundingRect(self):
return self.path.boundingRect()
def paint(self, p, *args):
#p.setPen(pg.mkPen((255,0,0, 150)))
p.setRenderHints(p.renderHints() | p.Antialiasing)
p.setCompositionMode(p.CompositionMode_Plus)
p.setPen(wlPen(self['wl']))
p.drawPath(self.path)
def mkPath(self):
self.prepareGeometryChange()
self.path = QtGui.QPainterPath()
self.path.moveTo(self['start'])
if self['end'] is not None:
self.path.lineTo(self['end'])
else:
self.path.lineTo(self['start']+500*self['dir'])
def trace(rays, optics):
if len(optics) < 1 or len(rays) < 1:
return
for r in rays:
r.clearChildren()
o = optics[0]
r2 = o.propagateRay(r)
trace(r2, optics[1:])
class Tracer(QtCore.QObject):
"""
Simple ray tracer.
Initialize with a list of rays and optics;
calling trace() will cause rays to be extended by propagating them through
each optic in sequence.
"""
def __init__(self, rays, optics):
QtCore.QObject.__init__(self)
self.optics = optics
self.rays = rays
for o in self.optics:
o.sigStateChanged.connect(self.trace)
self.trace()
def trace(self):
trace(self.rays, self.optics)
|
mit
|
xyzz/vcmi-build
|
project/jni/python/src/Demo/scripts/newslist.py
|
32
|
11360
|
#! /usr/bin/env python
#######################################################################
# Newslist $Revision: 66429 $
#
# Syntax:
# newslist [ -a ]
#
# This is a program to create a directory full of HTML pages
# which between them contain links to all the newsgroups available
# on your server.
#
# The -a option causes a complete list of all groups to be read from
# the server rather than just the ones which have appeared since last
# execution. This recreates the local list from scratch. Use this on
# the first invocation of the program, and from time to time thereafter.
# When new groups are first created they may appear on your server as
# empty groups. By default, empty groups are ignored by the -a option.
# However, these new groups will not be created again, and so will not
# appear in the server's list of 'new groups' at a later date. Hence it
# won't appear until you do a '-a' after some articles have appeared.
#
# I should really keep a list of ignored empty groups and re-check them
# for articles on every run, but I haven't got around to it yet.
#
# This assumes an NNTP news feed.
#
# Feel free to copy, distribute and modify this code for
# non-commercial use. If you make any useful modifications, let me
# know!
#
# (c) Quentin Stafford-Fraser 1994
# fraser@europarc.xerox.com qs101@cl.cam.ac.uk
# #
#######################################################################
import sys,nntplib, string, marshal, time, os, posix, string
#######################################################################
# Check these variables before running! #
# Top directory.
# Filenames which don't start with / are taken as being relative to this.
topdir='/anfs/qsbigdisc/web/html/newspage'
# The name of your NNTP host
# eg.
# newshost = 'nntp-serv.cl.cam.ac.uk'
# or use following to get the name from the NNTPSERVER environment
# variable:
# newshost = posix.environ['NNTPSERVER']
newshost = 'nntp-serv.cl.cam.ac.uk'
# The filename for a local cache of the newsgroup list
treefile = 'grouptree'
# The filename for descriptions of newsgroups
# I found a suitable one at ftp.uu.net in /uunet-info/newgroups.gz
# You can set this to '' if you don't wish to use one.
descfile = 'newsgroups'
# The directory in which HTML pages should be created
# eg.
# pagedir = '/usr/local/lib/html/newspage'
# pagedir = 'pages'
pagedir = topdir
# The html prefix which will refer to this directory
# eg.
# httppref = '/newspage/',
# or leave blank for relative links between pages: (Recommended)
# httppref = ''
httppref = ''
# The name of the 'root' news page in this directory.
# A .html suffix will be added.
rootpage = 'root'
# Set skipempty to 0 if you wish to see links to empty groups as well.
# Only affects the -a option.
skipempty = 1
# pagelinkicon can contain html to put an icon after links to
# further pages. This helps to make important links stand out.
# Set to '' if not wanted, or '...' is quite a good one.
pagelinkicon='... <img src="http://pelican.cl.cam.ac.uk/icons/page.xbm"> '
# ---------------------------------------------------------------------
# Less important personal preferences:
# Sublistsize controls the maximum number of items the will appear as
# an indented sub-list before the whole thing is moved onto a different
# page. The smaller this is, the more pages you will have, but the
# shorter each will be.
sublistsize = 4
# That should be all. #
#######################################################################
for dir in os.curdir, os.environ['HOME']:
rcfile = os.path.join(dir, '.newslistrc.py')
if os.path.exists(rcfile):
print rcfile
execfile(rcfile)
break
from nntplib import NNTP
from stat import *
rcsrev = '$Revision: 66429 $'
rcsrev = string.join(filter(lambda s: '$' not in s, string.split(rcsrev)))
desc = {}
# Make (possibly) relative filenames into absolute ones
treefile = os.path.join(topdir,treefile)
descfile = os.path.join(topdir,descfile)
page = os.path.join(topdir,pagedir)
# First the bits for creating trees ---------------------------
# Addtotree creates/augments a tree from a list of group names
def addtotree(tree, groups):
print 'Updating tree...'
for i in groups:
parts = string.splitfields(i,'.')
makeleaf(tree, parts)
# Makeleaf makes a leaf and the branch leading to it if necessary
def makeleaf(tree,path):
j = path[0]
l = len(path)
if not tree.has_key(j):
tree[j] = {}
if l == 1:
tree[j]['.'] = '.'
if l > 1:
makeleaf(tree[j],path[1:])
# Then the bits for outputting trees as pages ----------------
# Createpage creates an HTML file named <root>.html containing links
# to those groups beginning with <root>.
def createpage(root, tree, p):
filename = os.path.join(pagedir,root+'.html')
if root == rootpage:
detail = ''
else:
detail = ' under ' + root
f = open(filename,'w')
# f.write('Content-Type: text/html\n')
f.write('<TITLE>Newsgroups available' + detail + '</TITLE>\n')
f.write('<H1>Newsgroups available' + detail +'</H1>\n')
f.write('<A HREF="'+httppref+rootpage+'.html">Back to top level</A><P>\n')
printtree(f,tree,0,p)
f.write('<I>This page automatically created by \'newslist\' v. '+rcsrev+'.')
f.write(time.ctime(time.time()) + '</I><P>')
f.close()
# Printtree prints the groups as a bulleted list. Groups with
# more than <sublistsize> subgroups will be put on a separate page.
# Other sets of subgroups are just indented.
def printtree(f, tree, indent, p):
global desc
l = len(tree)
if l > sublistsize and indent>0:
# Create a new page and a link to it
f.write('<LI><B><A HREF="'+httppref+p[1:]+'.html">')
f.write(p[1:]+'.*')
f.write('</A></B>'+pagelinkicon+'\n')
createpage(p[1:], tree, p)
return
kl = tree.keys()
if l > 1:
kl.sort()
if indent > 0:
# Create a sub-list
f.write('<LI>'+p[1:]+'\n<UL>')
else:
# Create a main list
f.write('<UL>')
indent = indent + 1
for i in kl:
if i == '.':
# Output a newsgroup
f.write('<LI><A HREF="news:' + p[1:] + '">'+ p[1:] + '</A> ')
if desc.has_key(p[1:]):
f.write(' <I>'+desc[p[1:]]+'</I>\n')
else:
f.write('\n')
else:
# Output a hierarchy
printtree(f,tree[i], indent, p+'.'+i)
if l > 1:
f.write('\n</UL>')
# Reading descriptions file ---------------------------------------
# This returns an array mapping group name to its description
def readdesc(descfile):
global desc
desc = {}
if descfile == '':
return
try:
d = open(descfile, 'r')
print 'Reading descriptions...'
except (IOError):
print 'Failed to open description file ' + descfile
return
l = d.readline()
while l != '':
bits = string.split(l)
try:
grp = bits[0]
dsc = string.join(bits[1:])
if len(dsc)>1:
desc[grp] = dsc
except (IndexError):
pass
l = d.readline()
# Check that ouput directory exists, ------------------------------
# and offer to create it if not
def checkopdir(pagedir):
if not os.path.isdir(pagedir):
print 'Directory '+pagedir+' does not exist.'
print 'Shall I create it for you? (y/n)'
if sys.stdin.readline()[0] == 'y':
try:
os.mkdir(pagedir,0777)
except:
print 'Sorry - failed!'
sys.exit(1)
else:
print 'OK. Exiting.'
sys.exit(1)
# Read and write current local tree ----------------------------------
def readlocallist(treefile):
print 'Reading current local group list...'
tree = {}
try:
treetime = time.localtime(os.stat(treefile)[ST_MTIME])
except:
print '\n*** Failed to open local group cache '+treefile
print 'If this is the first time you have run newslist, then'
print 'use the -a option to create it.'
sys.exit(1)
treedate = '%02d%02d%02d' % (treetime[0] % 100 ,treetime[1], treetime[2])
try:
dump = open(treefile,'r')
tree = marshal.load(dump)
dump.close()
except (IOError):
print 'Cannot open local group list ' + treefile
return (tree, treedate)
def writelocallist(treefile, tree):
try:
dump = open(treefile,'w')
groups = marshal.dump(tree,dump)
dump.close()
print 'Saved list to '+treefile+'\n'
except:
print 'Sorry - failed to write to local group cache '+treefile
print 'Does it (or its directory) have the correct permissions?'
sys.exit(1)
# Return list of all groups on server -----------------------------
def getallgroups(server):
print 'Getting list of all groups...'
treedate='010101'
info = server.list()[1]
groups = []
print 'Processing...'
if skipempty:
print '\nIgnoring following empty groups:'
for i in info:
grpname = string.split(i[0])[0]
if skipempty and string.atoi(i[1]) < string.atoi(i[2]):
print grpname+' ',
else:
groups.append(grpname)
print '\n'
if skipempty:
print '(End of empty groups)'
return groups
# Return list of new groups on server -----------------------------
def getnewgroups(server, treedate):
print 'Getting list of new groups since start of '+treedate+'...',
info = server.newgroups(treedate,'000001')[1]
print 'got %d.' % len(info)
print 'Processing...',
groups = []
for i in info:
grpname = string.split(i)[0]
groups.append(grpname)
print 'Done'
return groups
# Now the main program --------------------------------------------
def main():
global desc
tree={}
# Check that the output directory exists
checkopdir(pagedir)
try:
print 'Connecting to '+newshost+'...'
if sys.version[0] == '0':
s = NNTP.init(newshost)
else:
s = NNTP(newshost)
connected = 1
except (nntplib.error_temp, nntplib.error_perm), x:
print 'Error connecting to host:', x
print 'I\'ll try to use just the local list.'
connected = 0
# If -a is specified, read the full list of groups from server
if connected and len(sys.argv) > 1 and sys.argv[1] == '-a':
groups = getallgroups(s)
# Otherwise just read the local file and then add
# groups created since local file last modified.
else:
(tree, treedate) = readlocallist(treefile)
if connected:
groups = getnewgroups(s, treedate)
if connected:
addtotree(tree, groups)
writelocallist(treefile,tree)
# Read group descriptions
readdesc(descfile)
print 'Creating pages...'
createpage(rootpage, tree, '')
print 'Done'
if __name__ == "__main__":
main()
# That's all folks
######################################################################
|
lgpl-2.1
|
javachengwc/hue
|
desktop/core/ext-py/guppy-0.1.10/guppy/etc/RE.py
|
37
|
18932
|
#._cv_part guppy.etc.RE
from guppy.etc.RE_Rect import chooserects
from guppy.etc.IterPermute import iterpermute
class InfiniteError(Exception):
pass
class WordsMemo:
def __init__(self, re, ch):
self.re = re
self.ch = ch
self.xs = {}
self.N = 0
def get_words_of_length(self, N):
# Return a list of words of length up to N
if N not in self.xs:
self.xs[N] = self.re.get_words_of_length_memoized(N, self)
return self.xs[N]
def get_words_of_length_upto(self, N):
# Return all words of length up to N, in the form
# [(0, <list of words of length 0>),
# (1, <list of words of length 0>),
# ...]
xsu = []
for i in range(N+1):
xs = self.get_words_of_length(i)
if xs:
xsu.append((i, xs))
return xsu
REBASE = tuple
class RE(REBASE):
# Regular expression nodes
# The operators are choosen to be compatible with Pythonic standards:
# o sets : using | for union
# o strings, sequences : using + for concatenation.
#
# This differs from mathematical presentations of regular
# expressions where + is the union, but it seemed more important
# to not confuse the Python usage.
# There are also operators for closure x*, x+ that can not be
# represented directly in Python expressions and these were choosen
# to use a function call syntax.
# The following table summarizes the operators.
# RE node expr re lib mathematical name
# x + y x y x y Concatenation
# x | y x | y x + y Union
# x('*') x* x* Kleene closure
# x('+') x+ x+ Positive closure
# x('?') x?
_re_special = r'.^$*+?{}\[]|()'
def __add__(a, b):
if isinstance(b, RE):
return concat(a, b)
else:
return Concatenation(a, Single(b))
def __call__(a, *args, **kwds):
if not kwds:
if args == ('*',):
return KleeneClosure(a)
elif args == ('+',):
return PositiveClosure(a)
elif args == ('?',):
return EpsilonOrOne(a)
raise ValueError, "Argument to regular expression must be '*' or '+' or '?'"
def __eq__(a, b):
return (a._name == b._name and
tuple(a) == tuple(b))
def __lt__(a, b):
if a._name == b._name:
return tuple(a) < tuple(b)
else:
return a._name < b._name
def __or__(a, b):
return Union(a, b)
def get_num_closures(self):
ns = 0
for ch in self:
ns += ch.get_num_closures()
return ns
def get_num_syms(self):
ns = 0
for ch in self:
ns += ch.get_num_syms()
return ns
def get_sum_sym_lengths(self):
ns = 0
for ch in self:
ns += ch.get_sum_sym_lengths()
return ns
def get_words_memo(self):
ch = [x.get_words_memo() for x in self]
return WordsMemo(self, ch)
def get_words_of_length(self, N):
xs = self.get_words_memo()
return xs.get_words_of_length(N)
def mapchildren(self, f):
return self.__class__(*[f(x) for x in self])
def regexpform(self):
return self.mappedrepr(regexpname)
def reversed(self):
return self.mapchildren(lambda x:x.reversed())
def rempretup(self):
def f(x):
if isinstance(x, Seq):
if x is not Epsilon and isinstance(x[0], tuple):
ws = x[1:]
return Seq(*ws)
else:
return x
return x.mapchildren(f)
return f(self)
def seqatoms(self):
sa = []
self.apseqatoms(sa.append)
return sa
def sequni(self):
d = {}
us = []
def ap(x):
if x not in d:
d[x] = 1
us.append(x)
self.apseq(ap)
return Union(*us)
def shform(self, conc = ' '):
r = self.mappedrepr(regexpname)
if conc != ' ':
r = conc.join(r.split(' '))
return r
def simplified(self, *a, **k):
return self
def simulform(self):
def f(x):
if x == '':
return '()'
return str(x)
return self.mappedrepr(f)
def regexpname(s):
if s == '':
return '()'
special = RE._re_special
ren = []
for c in str(s):
if c in special+"', ":
#c = r'\%s'%c
c = ''
ren.append(c)
return ''.join(ren)
def re_compare(a, b):
return a.__cmp__(b)
class Seq(RE):
_priority = 0
_name = 'Seq'
def __new__(clas, *symbols):
if not symbols:
return Epsilon
return REBASE.__new__(clas, symbols)
def __repr__(self):
return '%s(%s)'%(self.__class__.__name__, ', '.join(['%r'%(x,) for x in self]))
def __hash__(self):
return hash(repr(self))
def apseq(self, ap):
ap(self)
def apseqatoms(self, ap):
for x in self:
ap(Single(x))
def get_num_closures(self):
return 0
def get_num_syms(self):
return len(self)
def get_sum_sym_lengths(self):
s = 0
for x in self:
s += len(str(x))
return s
def get_words_memo(self):
return WordsMemo(self, ())
def get_words_of_length_memoized(self, N, memo):
if N == len(self):
return [self]
else:
return []
def limited(self, N):
return self
def mappedrepr(self, f):
if not self:
return f('')
return ' '.join(['%s'%(f(x),) for x in self])
def reversed(self):
r = list(self)
r.reverse()
return self.__class__(*r)
def unionsplitted(self):
return [self]
def Single(symbol):
return REBASE.__new__(Seq, (symbol,))
Epsilon = REBASE.__new__(Seq, ())
def concat(*args):
args = [x for x in args if x is not Epsilon]
if len(args) < 2:
if not args:
return Epsilon
return args[0]
return REBASE.__new__(Concatenation, args)
class Concatenation(RE):
_priority = 2
_name = 'Concat'
def __new__(clas, *args):
#assert Epsilon not in args
if len(args) < 2:
if not args:
return Epsilon
return args[0]
return REBASE.__new__(clas, args)
def __repr__(self):
rs = []
for ch in self:
r = '%r'%(ch,)
if ch._priority > self._priority:
r = '(%s)'%(r,)
rs.append(r)
return ' + '.join(rs)
def apseq(self, ap):
uns = [x.sequni() for x in self]
ixs = [0]*len(uns)
while 1:
xs = []
for (i, us) in enumerate(uns):
for x in us[ixs[i]]:
if x is not Epsilon:
xs.append(x)
ap(Seq(*xs))
j = 0
for j, ix in enumerate(ixs):
ix += 1
if ix >= len(uns[j]):
ix = 0
ixs[j] = ix
if ix != 0:
break
else:
break
def apseqatoms(self, ap):
for x in self:
x.apseqatoms(ap)
def get_words_of_length_memoized(self, N, memo):
chxs = []
for ch in memo.ch:
chxs.append(ch.get_words_of_length_upto(N))
xs = []
seen = {}
def ads(xx, i, n):
if i == len(chxs):
if n == N:
for toconc in iterpermute(*xx):
conc = simple_Concatenation(toconc)
if conc not in seen:
xs.append(conc)
seen[conc] = 1
else:
for m, x in chxs[i]:
if n + m <= N:
ads(xx + [x], i + 1, n + m)
ads([], 0, 0)
return xs
def limited(self, N):
return Concatenation(*[x.limited(N) for x in self])
def mappedrepr(self, f):
rs = []
for ch in self:
r = ch.mappedrepr(f)
if ch._priority > self._priority:
r = '(%s)'%(r,)
rs.append(r)
return ' '.join(rs)
def reversed(self):
r = [x.reversed() for x in self]
r.reverse()
return self.__class__(*r)
def simplified(self, *a, **k):
conc = [x.simplified(*a, **k) for x in self]
sa = []
for c in conc:
for a in c.seqatoms():
sa.append(a)
return simple_Concatenation(sa)
def unionsplitted(self):
runs = []
uns = []
for (i, x) in enumerate(self):
us = x.unionsplitted()
if len(us) > 1:
uns.append((i, us))
if not uns:
return [self]
ixs = [0]*len(uns)
ch = list(self)
while 1:
xs = []
i0 = 0
for j, (i, us) in enumerate(uns):
xs.extend(ch[i0:i])
ix = ixs[j]
xs.append(us[ix])
i0 = i + 1
xs.extend(ch[i0:])
runs.append( concat(*xs) )
j = 0
for j, ix in enumerate(ixs):
ix += 1
if ix >= len(uns[j][1]):
ix = 0
ixs[j] = ix
if ix != 0:
break
else:
return runs
class SimplifiedConcatenation(Concatenation):
def simplified(self, *a, **k):
# pdb.set_trace()
return self
def conclosure(conc):
# Simplification noted Mar 5 2005
# Simplify ... b b* ... or ... b* b ... to ... b+ ...
# conc is a sequence of regular expressions
seen = {}
nconc = []
w0 = None
for w in conc:
if w0 is not None:
if (w._name == '*' and # Not isinstance(KleeneClosure), would catch PositiveClosure
w[0] == w0):
w = PositiveClosure(w0)
elif (w0._name == '*' and
w0[0] == w):
w = PositiveClosure(w)
else:
if w0 is not None:
nconc.append(w0)
w0 = w
if w0 is not None:
nconc.append(w0)
return nconc
def simple_Concatenation(conc):
if len(conc) > 1:
conc0 = conc
conc = conclosure(conc)
nconc = []
i = 0
j = 0
while i < len(conc):
e = conc[i]
if not isinstance(e, Seq):
i += 1
nconc.append(e)
continue
j = i
while j < len(conc):
if not isinstance(conc[j], Seq):
break
j += 1
if j == i + 1:
nconc.append(e)
else:
syms = []
for k in range(i, j):
e = conc[k]
syms.extend(list(e))
nconc.append(Seq(*syms))
i = j
if len(nconc) > 1:
return Concatenation(*nconc)
elif nconc:
return nconc[0]
else:
return Epsilon
gauges = [
lambda x:x.get_num_syms(),
lambda x:x.get_num_closures(),
lambda x:x.get_sum_sym_lengths()
]
def simpleunion(lines, trace=''):
choosen = chooserects(lines, gauges, trace)
have_epsilon = 0
while 1:
if len(choosen) == 1 and (choosen[0].width == 0 or len(choosen[0].lines) == 1):
us = []
for line in choosen[0].lines:
if line:
us.append(line)
else:
have_epsilon = 1
break
us = []
for r in choosen:
conc = r.get_common_part()
olines = r.get_uncommons()
u = simpleunion(olines)
if u is not Epsilon:
if r.dir == -1:
conc = [u]+conc
else:
conc = conc + [u]
if conc:
us.append(conc)
else:
have_epsilon = 1
assert not isinstance(us[-1], str)
choosen = chooserects(us, gauges, trace)
if len(us) > 1:
nus = [simple_Concatenation(line) for line in us]
u = SimplifiedUnion(*nus)
elif us:
u = simple_Concatenation(us[0])
else:
u = None
if have_epsilon:
if u is not None:
u = simple_EpsilonOrOne(u)
else:
u = Epsilon
return u
class Union(RE):
_priority = 3
_name = 'Union'
def __new__(clas, *args):
return REBASE.__new__(clas, args)
def __repr__(self):
rs = []
for ch in self:
r = '%r'%(ch,)
if ch._priority > self._priority:
r = '(%s)'%r
rs.append(r)
return ' | '.join(rs)
def apseq(self, ap):
for c in self:
c.apseq(ap)
def apseqatoms(self, ap):
for x in self:
x.apseqatoms(ap)
def get_words_of_length_memoized(self, N, memo):
xs = []
seen = {}
for ch in memo.ch:
for x in ch.get_words_of_length(N):
if x not in seen:
seen[x] = 1
xs.append(x)
return xs
def limited(self, N):
uni = [x.limited(N) for x in self]
for i, x in enumerate(uni):
if x is not self[i]:
return self.__class__(*uni)
return self
def mappedrepr(self, f):
rs = []
for ch in self:
r = '%s'%(ch.mappedrepr(f),)
if ch._priority > self._priority:
r = '(%s)'%r
rs.append(r)
return ' | '.join(rs)
def simplified(self, args=None, trace='', *a, **k):
if args is None:
args = [x.simplified() for x in self.unionsplitted()]
#args = [x for x in self.unionsplitted()]
# Create a simplfied union
# Assuming args are simplified, non-unions
ch = [a.seqatoms() for a in args]
return simpleunion(ch, trace)
def unionsplitted(self):
us = []
for x in self:
us.extend(list(x.unionsplitted()))
return us
class SimplifiedUnion(Union):
def simplified(self, *a, **k):
return self
class Called(RE):
_priority = 1
def __new__(clas, arg):
return REBASE.__new__(clas, (arg,))
def __repr__(self):
ch = self[0]
r = '%r'%(ch,)
if ch._priority > self._priority:
r = '(%s)'%r
return "%s(%r)"%(r, self._name)
def apseqatoms(self, ap):
ap(self)
def get_num_closures(self):
return 1 + self[0].get_num_closures()
def mappedrepr(self, f):
ch = self[0]
r = ch.mappedrepr(f)
if (ch._priority > self._priority
or isinstance(ch, Seq) and len(ch) > 1):
r = '(%s)'%r
return "%s%s"%(r, self._name)
def simplified(self, *a, **k):
return self.__class__(self[0].simplified(*a, **k))
class Closure(Called):
def get_words_of_length_memoized(self, N, memo):
if N == 0:
return [Epsilon]
if N == 1:
return memo.ch[0].get_words_of_length(1)
xs = []
seen = {}
for i in range(1, N):
a = memo.get_words_of_length(i)
b = memo.get_words_of_length(N-i)
for ai in a:
for bi in b:
aibi = simple_Concatenation((ai, bi))
if aibi not in seen:
xs.append(aibi)
seen[aibi] = 1
for x in memo.ch[0].get_words_of_length(N):
if x not in seen:
xs.append(x)
seen[x] = 1
return xs
def unionsplitted(self):
return [self]
class KleeneClosure(Closure):
_name = '*'
def apseq(self, ap):
raise InfiniteError, 'apseq: Regular expression is infinite: contains a Kleene Closure'
def limited(self, N):
if N == 0:
return Epsilon
cl = self[0].limited(N)
uni = []
for i in range(N+1):
toconc = [cl]*i
uni.append(Concatenation(*toconc))
return Union(*uni)
def simplified(self, *a, **k):
return simple_KleeneClosure(self[0].simplified(*a, **k))
def simple_KleeneClosure(x):
# (b+)* -> b*
if x._name == '+':
return simple_KleeneClosure(x[0])
return KleeneClosure(x)
class PositiveClosure(Closure):
_name = '+'
def apseq(self, ap):
raise InfiniteError, 'apseq: Regular expression is infinite: contains a Positive Closure'
def apseqatoms(self, ap):
self[0].apseqatoms(ap)
simple_KleeneClosure(self[0]).apseqatoms(ap)
def get_words_of_length_memoized(self, N, memo):
if N <= 1:
return memo.ch[0].get_words_of_length(N)
return Closure.get_words_of_length_memoized(self, N, memo)
def limited(self, N):
a = self[0].limited(N)
b = KleeneClosure(self[0]).limited(N)
return Concatenation(a, b)
class EpsilonOrOne(Called):
_name = '?'
def apseq(self, ap):
ap(Epsilon)
self[0].apseq(ap)
def get_words_of_length_memoized(self, N, memo):
if N == 0:
return [Epsilon]
return memo.ch[0].get_words_of_length(N)
def limited(self, N):
x = self[0].limited(N)
if x is not self[0]:
self = self.__class__(x)
return self
def simplified(self, *a, **k):
return simple_EpsilonOrOne(self[0].simplified(*a, **k))
def unionsplitted(self):
return [Epsilon] + list(self[0].unionsplitted())
def simple_EpsilonOrOne(x):
# (a+)? -> a*
if x._name == '+':
return simple_KleeneClosure(x)
# (a*)? -> a*
if x._name == '*':
return x
return EpsilonOrOne(x)
class RegularSystem:
def __init__(self, table, Start, final_states):
self.table = table
self.Start = Start
self.Final = '358f0eca5c34bacdfbf6a8ac0ccf84bc'
self.final_states = final_states
def pp(self):
def statename(state):
try:
name = self.names[state]
except KeyError:
name = str(state)
return name
def transname(trans):
name = trans.simulform()
if trans._priority > 1:
name = '(%s)'%(name,)
return name
self.setup_names()
X = self.X
xs = [self.Start]+self.order
xs.append(self.Final)
for Xk in xs:
if Xk not in X:
continue
print '%3s = '%(statename(Xk),),
Tk = X[Xk]
es = []
for Xj in xs:
if Xj in Tk:
es.append('%s %s'%(transname(Tk[Xj]), statename(Xj)))
if es:
print ' | '.join(es)
else:
print
def setup_equations(self):
table = self.table
final_states = self.final_states
Final = self.Final
self.X = X = {Final:{}}
for Xi, transitions in table.items():
X[Xi] = Ti = {}
for (symbol, Xj) in transitions.items():
Ti.setdefault(Xj, []).append(Single(symbol))
for Xj, Aij in Ti.items():
if len(Aij) > 1:
Aij.sort()
Aij = Union(*Aij)
else:
Aij = Aij[0]
Ti[Xj] = Aij
if Xi in final_states:
Ti[Final] = Epsilon
def setup_order(self):
def dists(X, start):
i = 0
S = {start:i}
news = [start]
while news:
oldnews = news
news = []
i += 1
for s in oldnews:
if s not in X:
continue
for t in X[s]:
if t not in S:
news.append(t)
S[t] = i
return S
def start_distance(x):
return start_dists[x]
def sumt(f):
memo = {}
def g(x):
if x in memo:
return memo[x]
s = 0.0
for y in X[x]:
s += f(y)
memo[x] = s
return s
return g
def cmp3(x, y):
# Comparison for the sorting of equation solving order
# First in list = solved last
if x is y:
return 0
c = cmp(len(X[y]), len(X[x])) # Equations with more terms are resolved later
if c:
return c
# The equations with terms more distant from start node will be resolved earlier
i = 0
while i < 10: # 4 was enough with tests so far at Feb 24 2005
try:
f = sumdists[i]
except:
f = sumt(sumdists[i-1])
sumdists.append(f)
c = cmp(f(x), f(y))
if c:
return c
i += 1
#pdb.set_trace()
return cmp(x, y)
sumdists = [start_distance]
X = self.X
Start = self.Start
Final = self.Final
start_dists = dists(X, Start)
order = [x for x in start_dists if x is not Start and x is not Final]
order.sort(cmp3)
self.order = order
def setup_names(self):
try:
self.order
except AttributeError:
self.setup_order()
self.names = {}
self.names[self.Start] = 'X0'
for i, s in enumerate(self.order):
self.names[s] = 'X%d'%(i+1)
self.names[self.Final] = 'Final'
def solve(self):
# Set up equation system
self.setup_equations()
self.setup_order()
X = self.X
Start = self.Start
Final = self.Final
todo = list(self.order)
# Solve equation system
while todo:
Xk = todo.pop()
Tk = X[Xk]
if Xk in Tk:
# Recursive equation
# Eliminate Akk Xk, using Adler's theorem
# Given:
# Xk = Ak0 X0 | ... Akk Xk |.. Akn Xkn
# we get:
# Xk = Akk* (Ak0 X0 | ... <no Xk> ... | Akn Xn)
# which we evaluate to:
# Xk = Bk0 X0 | ... Bkn Xn
# where coefficients get the new values
# Bki := Akk* Aki
Akk = Tk[Xk]
del Tk[Xk]
AkkStar = Akk('*')
for Xi, Aki in Tk.items():
Bki = AkkStar + Aki
Tk[Xi] = Bki
# Substitute Xk in each other equation in X
# containing Xk, except eqv. Xk itself, which will not be used any more..
del X[Xk]
for Xj, Tj in X.items():
Bjk = Tj.get(Xk)
if Bjk is None:
continue
del Tj[Xk]
for Xji, Tk_Xji in Tk.items():
Cji = (Bjk + Tk_Xji)
Bji = Tj.get(Xji)
if Bji is not None:
Cji = Bji | Cji
Tj[Xji] = Cji
# The equation system is now solved
# The result is in Final term of Start equation
return X[Start][Final]
Nothing = Union()
def SolveFSA(fsa):
RS = RegularSystem(fsa.table, fsa.start_state, fsa.final_states)
return RS.solve()
|
apache-2.0
|
foromer4/scrapy
|
scrapy/utils/benchserver.py
|
38
|
1332
|
import random
from six.moves.urllib.parse import urlencode
from twisted.web.server import Site
from twisted.web.resource import Resource
from twisted.internet import reactor
class Root(Resource):
isLeaf = True
def getChild(self, name, request):
return self
def render(self, request):
total = _getarg(request, b'total', 100, int)
show = _getarg(request, b'show', 10, int)
nlist = [random.randint(1, total) for _ in range(show)]
request.write(b"<html><head></head><body>")
args = request.args.copy()
for nl in nlist:
args['n'] = nl
argstr = urlencode(args, doseq=True)
request.write("<a href='/follow?{0}'>follow {1}</a><br>"
.format(argstr, nl).encode('utf8'))
request.write(b"</body></html>")
return b''
def _getarg(request, name, default=None, type=str):
return type(request.args[name][0]) \
if name in request.args else default
if __name__ == '__main__':
root = Root()
factory = Site(root)
httpPort = reactor.listenTCP(8998, Site(root))
def _print_listening():
httpHost = httpPort.getHost()
print("Bench server at http://{}:{}".format(httpHost.host, httpHost.port))
reactor.callWhenRunning(_print_listening)
reactor.run()
|
bsd-3-clause
|
macauleycheng/AOS_OF_Example
|
04-VxLAN/04-AccessToNetworkPortFloodByMcast/edit_config.py
|
4
|
8282
|
import pkg_resources
pkg_resources.require("ncclient==0.4.3")
from ncclient import manager
import ncclient
#due to ofconfig design problem, it need fill port feature
#but we won't use it currently.
#of-agent nexthop 2 destination user-input-dst-mac ethernet 1/2 vid 2
config_nexthop_ucast_xml="""
<config>
<of11-config:capable-switch xmlns:of11-config="urn:onf:of111:config:yang">
<ofdpa10:next-hop xmlns:ofdpa10="urn:bcm:ofdpa10:accton01">
<ofdpa10:id>2</ofdpa10:id>
<ofdpa10:dest-mac>user-input-dst-mac</ofdpa10:dest-mac>
<ofdpa10:phy-port>2</ofdpa10:phy-port>
<ofdpa10:vid>2</ofdpa10:vid>
</ofdpa10:next-hop>
</of11-config:capable-switch>
</config>
"""
#of-agent nexthop 20 destination 01-00-5e-01-01-01 ethernet 1/2 vid 2
config_nexthop_mcast_xml="""
<config>
<of11-config:capable-switch xmlns:of11-config="urn:onf:of111:config:yang">
<ofdpa10:next-hop xmlns:ofdpa10="urn:bcm:ofdpa10:accton01">
<ofdpa10:id>20</ofdpa10:id>
<ofdpa10:dest-mac>01:00:5E:01:01:01</ofdpa10:dest-mac>
<ofdpa10:phy-port>2</ofdpa10:phy-port>
<ofdpa10:vid>2</ofdpa10:vid>
</ofdpa10:next-hop>
</of11-config:capable-switch>
</config>
"""
#of-agent vni 10 multicast 224.1.1.1 nexthop 20
config_vni_xml="""
<config>
<of11-config:capable-switch xmlns:of11-config="urn:onf:of111:config:yang">
<ofdpa10:vni xmlns:ofdpa10="urn:bcm:ofdpa10:accton01">
<ofdpa10:id>10</ofdpa10:id>
<ofdpa10:vni-multicast-group>224.1.1.1</ofdpa10:vni-multicast-group>
<ofdpa10:multicast-group-nexthop-id>20</ofdpa10:multicast-group-nexthop-id>
</ofdpa10:vni>
</of11-config:capable-switch>
</config>
"""
#of-agent vtap 10001 ethernet 1/1 vid 1
#of-agent vtp 10001 vni 10
config_vtap_xml="""
<config>
<capable-switch xmlns="urn:onf:of111:config:yang">
<id>capable-switch-1</id>
<resources>
<port>
<resource-id>10001</resource-id>
<features>
<current>
<rate>10Gb</rate>
<medium>fiber</medium>
<pause>symmetric</pause>
</current>
<advertised>
<rate>10Gb</rate>
<rate>100Gb</rate>
<medium>fiber</medium>
<pause>symmetric</pause>
</advertised>
<supported>
<rate>10Gb</rate>
<rate>100Gb</rate>
<medium>fiber</medium>
<pause>symmetric</pause>
</supported>
<advertised-peer>
<rate>10Gb</rate>
<rate>100Gb</rate>
<medium>fiber</medium>
<pause>symmetric</pause>
</advertised-peer>
</features>
<ofdpa10:vtap xmlns:ofdpa10="urn:bcm:ofdpa10:accton01">
<ofdpa10:phy-port>1</ofdpa10:phy-port>
<ofdpa10:vid>1</ofdpa10:vid>
<ofdpa10:vni>10</ofdpa10:vni>
</ofdpa10:vtap>
</port>
</resources>
<logical-switches>
<switch>
<id>user-input-switch-cpu-mac</id>
<datapath-id>user-input-switch-cpu-mac</datapath-id>
<resources>
<port>10001</port>
</resources>
</switch>
</logical-switches>
</capable-switch>
</config>
"""
#of-agent vtep 10002 source user-input-src-ip destination user-input-dst-ip udp-source-port 6633 nexthop 2 ttl 25
config_vtep_xml="""
<config>
<capable-switch xmlns="urn:onf:of111:config:yang">
<id>capable-switch-1</id>
<ofdpa10:udp-dest-port xmlns:ofdpa10="urn:bcm:ofdpa10:accton01">6633</ofdpa10:udp-dest-port>
<resources>
<port>
<resource-id>10002</resource-id>
<features>
<current>
<rate>10Gb</rate>
<medium>fiber</medium>
<pause>symmetric</pause>
</current>
<advertised>
<rate>10Gb</rate>
<rate>100Gb</rate>
<medium>fiber</medium>
<pause>symmetric</pause>
</advertised>
<supported>
<rate>10Gb</rate>
<rate>100Gb</rate>
<medium>fiber</medium>
<pause>symmetric</pause>
</supported>
<advertised-peer>
<rate>10Gb</rate>
<rate>100Gb</rate>
<medium>fiber</medium>
<pause>symmetric</pause>
</advertised-peer>
</features>
<ofdpa10:vtep xmlns:ofdpa10="urn:bcm:ofdpa10:accton01">
<ofdpa10:src-ip>user-input-src-ip</ofdpa10:src-ip>
<ofdpa10:dest-ip>user-input-dst-ip</ofdpa10:dest-ip>
<ofdpa10:udp-src-port>6633</ofdpa10:udp-src-port>
<ofdpa10:vni>10</ofdpa10:vni>
<ofdpa10:nexthop-id>2</ofdpa10:nexthop-id>
<ofdpa10:ttl>25</ofdpa10:ttl>
</ofdpa10:vtep>
</port>
</resources>
<logical-switches>
<switch>
<id>user-input-switch-cpu-mac</id>
<datapath-id>user-input-switch-cpu-mac</datapath-id>
<resources>
<port>10002</port>
</resources>
</switch>
</logical-switches>
</capable-switch>
</config>
"""
def replace_vtep_vtap_nexthop(sip, dip, smac, dmac):
global nexthop_ucast_xml
nexthop_ucast_xml=config_nexthop_ucast_xml.replace("user-input-dst-mac", dmac)
global vtep_xml
vtep_xml=config_vtep_xml.replace("user-input-switch-cpu-mac", "00:00:"+smac)
vtep_xml=vtep_xml.replace("user-input-src-ip", sip)
vtep_xml=vtep_xml.replace("user-input-dst-ip", dip)
global vtap_xml
vtap_xml=config_vtap_xml.replace("user-input-switch-cpu-mac","00:00:"+smac)
def send_edit_config(host_ip, username, password):
with manager.connect_ssh(host=host_ip, port=830, username=username, password=password, hostkey_verify=False ) as m:
try:
m.edit_config(target='running',
config=nexthop_ucast_xml,
default_operation='merge',
error_option='stop-on-error')
except Exception as e:
print "Fail to edit-config config_nexthop_ucast_xml"
return -1
try:
m.edit_config(target='running',
config=config_nexthop_mcast_xml,
default_operation='merge',
error_option='stop-on-error')
except Exception as e:
print "Fail to edit-config config_nexthop_mcast_xml"
return -1
try:
m.edit_config(target='running',
config=config_vni_xml,
default_operation='merge',
error_option='stop-on-error')
except Exception as e:
print "Fail to edit-config config_vni_xml"
return -1
try:
m.edit_config(target='running',
config=vtep_xml,
default_operation='merge',
error_option='stop-on-error')
except Exception as e:
print "Fail to edit-config vtep_xml"
return -1
try:
m.edit_config(target='running',
config=vtap_xml,
default_operation='merge',
error_option='stop-on-error')
except Exception as e:
print "Fail to edit-config vtap_xml"
return -1
print m.get_config(source='running').data_xml
#replace_vtep_vtap_nexthop("10.1.1.1", "10.1.2.1", "70:72:cf:dc:9e:da", "70:72:cf:b5:ea:88")
#send_edit_config("192.168.1.1", "netconfuser", "netconfuser")
|
apache-2.0
|
381426068/ardupilot
|
Tools/autotest/param_metadata/htmlemit.py
|
148
|
2754
|
#!/usr/bin/env python
import re
from param import *
from emit import Emit
import cgi
# Emit docs in a form acceptable to the APM wordpress docs site
class HtmlEmit(Emit):
def __init__(self):
html_fname = 'Parameters.html'
self.f = open(html_fname, mode='w')
self.preamble = '''<!-- Dynamically generated list of documented parameters
This page was generated using Tools/autotest/param_metadata/param_parse.py
DO NOT EDIT
-->
<h3 style="text-align: center">Complete Parameter List</h3>
<hr />
<p>This is a complete list of the parameters which can be set via the MAVLink protocol in the EEPROM of your APM to control vehicle behaviour. This list is automatically generated from the latest ardupilot source code, and so may contain parameters which are not yet in the stable released versions of the code.</p>
<!-- add auto-generated table of contents with "Table of Contents Plus" plugin -->
[toc exclude="Complete Parameter List"]
'''
self.t = ''
def escape(self, s):
s = s.replace(' ', '-')
s = s.replace(':', '-')
s = s.replace('(', '')
s = s.replace(')', '')
return s
def close(self):
self.f.write(self.preamble)
self.f.write(self.t)
self.f.close()
def start_libraries(self):
pass
def emit(self, g, f):
tag = '%s Parameters' % g.name
t = '\n\n<h1>%s</h1>\n' % tag
for param in g.params:
if not hasattr(param, 'DisplayName') or not hasattr(param, 'Description'):
continue
d = param.__dict__
tag = '%s (%s)' % (param.DisplayName, param.name)
t += '\n\n<h2>%s</h2>' % tag
if d.get('User',None) == 'Advanced':
t += '<em>Note: This parameter is for advanced users</em><br>'
t += "\n\n<p>%s</p>\n" % cgi.escape(param.Description)
t += "<ul>\n"
for field in param.__dict__.keys():
if field not in ['name', 'DisplayName', 'Description', 'User'] and field in known_param_fields:
if field == 'Values' and Emit.prog_values_field.match(param.__dict__[field]):
values = (param.__dict__[field]).split(',')
t += "<table><th>Value</th><th>Meaning</th>\n"
for value in values:
v = value.split(':')
t += "<tr><td>%s</td><td>%s</td></tr>\n" % (v[0], v[1])
t += "</table>\n"
else:
t += "<li>%s: %s</li>\n" % (field, cgi.escape(param.__dict__[field]))
t += "</ul>\n"
self.t += t
|
gpl-3.0
|
BT-fgarbely/odoo
|
addons/account_budget/report/crossovered_budget_report.py
|
315
|
8657
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class budget_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(budget_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'funct': self.funct,
'funct_total': self.funct_total,
'time': time,
})
self.context = context
def funct(self, object, form, ids=None, done=None, level=1):
if ids is None:
ids = {}
if not ids:
ids = self.ids
if not done:
done = {}
global tot
tot = {
'theo':0.00,
'pln':0.00,
'prac':0.00,
'perc':0.00
}
result = []
budgets = self.pool.get('crossovered.budget').browse(self.cr, self.uid, [object.id], self.context.copy())
c_b_lines_obj = self.pool.get('crossovered.budget.lines')
acc_analytic_obj = self.pool.get('account.analytic.account')
for budget_id in budgets:
res = {}
budget_lines = []
budget_ids = []
d_from = form['date_from']
d_to = form['date_to']
for line in budget_id.crossovered_budget_line:
budget_ids.append(line.id)
if not budget_ids:
return []
self.cr.execute('SELECT DISTINCT(analytic_account_id) FROM crossovered_budget_lines WHERE id = ANY(%s)',(budget_ids,))
an_ids = self.cr.fetchall()
context = {'wizard_date_from': d_from, 'wizard_date_to': d_to}
for i in range(0, len(an_ids)):
if not an_ids[i][0]:
continue
analytic_name = acc_analytic_obj.browse(self.cr, self.uid, [an_ids[i][0]])
res={
'b_id': '-1',
'a_id': '-1',
'name': analytic_name[0].name,
'status': 1,
'theo': 0.00,
'pln': 0.00,
'prac': 0.00,
'perc': 0.00
}
result.append(res)
line_ids = c_b_lines_obj.search(self.cr, self.uid, [('id', 'in', budget_ids), ('analytic_account_id','=',an_ids[i][0]), ('date_to', '>=', d_from), ('date_from', '<=', d_to)])
line_id = c_b_lines_obj.browse(self.cr, self.uid, line_ids)
tot_theo = tot_pln = tot_prac = tot_perc = 0.00
done_budget = []
for line in line_id:
if line.id in budget_ids:
theo = pract = 0.00
theo = c_b_lines_obj._theo_amt(self.cr, self.uid, [line.id], context)[line.id]
pract = c_b_lines_obj._prac_amt(self.cr, self.uid, [line.id], context)[line.id]
if line.general_budget_id.id in done_budget:
for record in result:
if record['b_id'] == line.general_budget_id.id and record['a_id'] == line.analytic_account_id.id:
record['theo'] += theo
record['pln'] += line.planned_amount
record['prac'] += pract
if record['theo'] <> 0.00:
perc = (record['prac'] / record['theo']) * 100
else:
perc = 0.00
record['perc'] = perc
tot_theo += theo
tot_pln += line.planned_amount
tot_prac += pract
tot_perc += perc
else:
if theo <> 0.00:
perc = (pract / theo) * 100
else:
perc = 0.00
res1 = {
'a_id': line.analytic_account_id.id,
'b_id': line.general_budget_id.id,
'name': line.general_budget_id.name,
'status': 2,
'theo': theo,
'pln': line.planned_amount,
'prac': pract,
'perc': perc,
}
tot_theo += theo
tot_pln += line.planned_amount
tot_prac += pract
tot_perc += perc
if form['report'] == 'analytic-full':
result.append(res1)
done_budget.append(line.general_budget_id.id)
else:
if line.general_budget_id.id in done_budget:
continue
else:
res1={
'a_id': line.analytic_account_id.id,
'b_id': line.general_budget_id.id,
'name': line.general_budget_id.name,
'status': 2,
'theo': 0.00,
'pln': 0.00,
'prac': 0.00,
'perc': 0.00
}
if form['report'] == 'analytic-full':
result.append(res1)
done_budget.append(line.general_budget_id.id)
if tot_theo == 0.00:
tot_perc = 0.00
else:
tot_perc = float(tot_prac / tot_theo) * 100
if form['report'] == 'analytic-full':
result[-(len(done_budget) +1)]['theo'] = tot_theo
tot['theo'] += tot_theo
result[-(len(done_budget) +1)]['pln'] = tot_pln
tot['pln'] += tot_pln
result[-(len(done_budget) +1)]['prac'] = tot_prac
tot['prac'] += tot_prac
result[-(len(done_budget) +1)]['perc'] = tot_perc
else:
result[-1]['theo'] = tot_theo
tot['theo'] += tot_theo
result[-1]['pln'] = tot_pln
tot['pln'] += tot_pln
result[-1]['prac'] = tot_prac
tot['prac'] += tot_prac
result[-1]['perc'] = tot_perc
if tot['theo'] == 0.00:
tot['perc'] = 0.00
else:
tot['perc'] = float(tot['prac'] / tot['theo']) * 100
return result
def funct_total(self, form):
result = []
res = {}
res = {
'tot_theo': tot['theo'],
'tot_pln': tot['pln'],
'tot_prac': tot['prac'],
'tot_perc': tot['perc']
}
result.append(res)
return result
class report_crossoveredbudget(osv.AbstractModel):
_name = 'report.account_budget.report_crossoveredbudget'
_inherit = 'report.abstract_report'
_template = 'account_budget.report_crossoveredbudget'
_wrapped_report_class = budget_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
chadnetzer/numpy-gaurdro
|
numpy/linalg/setup.py
|
6
|
1339
|
import sys
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info
config = Configuration('linalg',parent_package,top_path)
config.add_data_dir('tests')
# Configure lapack_lite
lapack_info = get_info('lapack_opt',0) # and {}
def get_lapack_lite_sources(ext, build_dir):
if not lapack_info:
print "### Warning: Using unoptimized lapack ###"
return ext.depends[:-1]
else:
if sys.platform=='win32':
print "### Warning: python_xerbla.c is disabled ###"
return ext.depends[:1]
return ext.depends[:2]
config.add_extension('lapack_lite',
sources = [get_lapack_lite_sources],
depends= ['lapack_litemodule.c',
'python_xerbla.c',
'zlapack_lite.c', 'dlapack_lite.c',
'blas_lite.c', 'dlamch.c',
'f2c_lite.c','f2c.h'],
extra_info = lapack_info
)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
|
bsd-3-clause
|
nugget/home-assistant
|
homeassistant/components/apcupsd/sensor.py
|
2
|
7313
|
"""Support for APCUPSd sensors."""
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
import homeassistant.helpers.config_validation as cv
from homeassistant.components import apcupsd
from homeassistant.const import (TEMP_CELSIUS, CONF_RESOURCES, POWER_WATT)
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = [apcupsd.DOMAIN]
SENSOR_PREFIX = 'UPS '
SENSOR_TYPES = {
'alarmdel': ['Alarm Delay', '', 'mdi:alarm'],
'ambtemp': ['Ambient Temperature', '', 'mdi:thermometer'],
'apc': ['Status Data', '', 'mdi:information-outline'],
'apcmodel': ['Model', '', 'mdi:information-outline'],
'badbatts': ['Bad Batteries', '', 'mdi:information-outline'],
'battdate': ['Battery Replaced', '', 'mdi:calendar-clock'],
'battstat': ['Battery Status', '', 'mdi:information-outline'],
'battv': ['Battery Voltage', 'V', 'mdi:flash'],
'bcharge': ['Battery', '%', 'mdi:battery'],
'cable': ['Cable Type', '', 'mdi:ethernet-cable'],
'cumonbatt': ['Total Time on Battery', '', 'mdi:timer'],
'date': ['Status Date', '', 'mdi:calendar-clock'],
'dipsw': ['Dip Switch Settings', '', 'mdi:information-outline'],
'dlowbatt': ['Low Battery Signal', '', 'mdi:clock-alert'],
'driver': ['Driver', '', 'mdi:information-outline'],
'dshutd': ['Shutdown Delay', '', 'mdi:timer'],
'dwake': ['Wake Delay', '', 'mdi:timer'],
'endapc': ['Date and Time', '', 'mdi:calendar-clock'],
'extbatts': ['External Batteries', '', 'mdi:information-outline'],
'firmware': ['Firmware Version', '', 'mdi:information-outline'],
'hitrans': ['Transfer High', 'V', 'mdi:flash'],
'hostname': ['Hostname', '', 'mdi:information-outline'],
'humidity': ['Ambient Humidity', '%', 'mdi:water-percent'],
'itemp': ['Internal Temperature', TEMP_CELSIUS, 'mdi:thermometer'],
'lastxfer': ['Last Transfer', '', 'mdi:transfer'],
'linefail': ['Input Voltage Status', '', 'mdi:information-outline'],
'linefreq': ['Line Frequency', 'Hz', 'mdi:information-outline'],
'linev': ['Input Voltage', 'V', 'mdi:flash'],
'loadpct': ['Load', '%', 'mdi:gauge'],
'loadapnt': ['Load Apparent Power', '%', 'mdi:gauge'],
'lotrans': ['Transfer Low', 'V', 'mdi:flash'],
'mandate': ['Manufacture Date', '', 'mdi:calendar'],
'masterupd': ['Master Update', '', 'mdi:information-outline'],
'maxlinev': ['Input Voltage High', 'V', 'mdi:flash'],
'maxtime': ['Battery Timeout', '', 'mdi:timer-off'],
'mbattchg': ['Battery Shutdown', '%', 'mdi:battery-alert'],
'minlinev': ['Input Voltage Low', 'V', 'mdi:flash'],
'mintimel': ['Shutdown Time', '', 'mdi:timer'],
'model': ['Model', '', 'mdi:information-outline'],
'nombattv': ['Battery Nominal Voltage', 'V', 'mdi:flash'],
'nominv': ['Nominal Input Voltage', 'V', 'mdi:flash'],
'nomoutv': ['Nominal Output Voltage', 'V', 'mdi:flash'],
'nompower': ['Nominal Output Power', POWER_WATT, 'mdi:flash'],
'nomapnt': ['Nominal Apparent Power', 'VA', 'mdi:flash'],
'numxfers': ['Transfer Count', '', 'mdi:counter'],
'outcurnt': ['Output Current', 'A', 'mdi:flash'],
'outputv': ['Output Voltage', 'V', 'mdi:flash'],
'reg1': ['Register 1 Fault', '', 'mdi:information-outline'],
'reg2': ['Register 2 Fault', '', 'mdi:information-outline'],
'reg3': ['Register 3 Fault', '', 'mdi:information-outline'],
'retpct': ['Restore Requirement', '%', 'mdi:battery-alert'],
'selftest': ['Last Self Test', '', 'mdi:calendar-clock'],
'sense': ['Sensitivity', '', 'mdi:information-outline'],
'serialno': ['Serial Number', '', 'mdi:information-outline'],
'starttime': ['Startup Time', '', 'mdi:calendar-clock'],
'statflag': ['Status Flag', '', 'mdi:information-outline'],
'status': ['Status', '', 'mdi:information-outline'],
'stesti': ['Self Test Interval', '', 'mdi:information-outline'],
'timeleft': ['Time Left', '', 'mdi:clock-alert'],
'tonbatt': ['Time on Battery', '', 'mdi:timer'],
'upsmode': ['Mode', '', 'mdi:information-outline'],
'upsname': ['Name', '', 'mdi:information-outline'],
'version': ['Daemon Info', '', 'mdi:information-outline'],
'xoffbat': ['Transfer from Battery', '', 'mdi:transfer'],
'xoffbatt': ['Transfer from Battery', '', 'mdi:transfer'],
'xonbatt': ['Transfer to Battery', '', 'mdi:transfer'],
}
SPECIFIC_UNITS = {
'ITEMP': TEMP_CELSIUS
}
INFERRED_UNITS = {
' Minutes': 'min',
' Seconds': 'sec',
' Percent': '%',
' Volts': 'V',
' Ampere': 'A',
' Volt-Ampere': 'VA',
' Watts': POWER_WATT,
' Hz': 'Hz',
' C': TEMP_CELSIUS,
' Percent Load Capacity': '%',
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_RESOURCES, default=[]):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the APCUPSd sensors."""
entities = []
for resource in config[CONF_RESOURCES]:
sensor_type = resource.lower()
if sensor_type not in SENSOR_TYPES:
SENSOR_TYPES[sensor_type] = [
sensor_type.title(), '', 'mdi:information-outline']
if sensor_type.upper() not in apcupsd.DATA.status:
_LOGGER.warning(
"Sensor type: %s does not appear in the APCUPSd status output",
sensor_type)
entities.append(APCUPSdSensor(apcupsd.DATA, sensor_type))
add_entities(entities, True)
def infer_unit(value):
"""If the value ends with any of the units from ALL_UNITS.
Split the unit off the end of the value and return the value, unit tuple
pair. Else return the original value and None as the unit.
"""
from apcaccess.status import ALL_UNITS
for unit in ALL_UNITS:
if value.endswith(unit):
return value[:-len(unit)], INFERRED_UNITS.get(unit, unit.strip())
return value, None
class APCUPSdSensor(Entity):
"""Representation of a sensor entity for APCUPSd status values."""
def __init__(self, data, sensor_type):
"""Initialize the sensor."""
self._data = data
self.type = sensor_type
self._name = SENSOR_PREFIX + SENSOR_TYPES[sensor_type][0]
self._unit = SENSOR_TYPES[sensor_type][1]
self._inferred_unit = None
self._state = None
@property
def name(self):
"""Return the name of the UPS sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return SENSOR_TYPES[self.type][2]
@property
def state(self):
"""Return true if the UPS is online, else False."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
if not self._unit:
return self._inferred_unit
return self._unit
def update(self):
"""Get the latest status and use it to update our sensor state."""
if self.type.upper() not in self._data.status:
self._state = None
self._inferred_unit = None
else:
self._state, self._inferred_unit = infer_unit(
self._data.status[self.type.upper()])
|
apache-2.0
|
all-of-us/raw-data-repository
|
rdr_service/alembic/versions/72365b7c0037_add_gender_identity_enums.py
|
1
|
1232
|
"""add gender identity enums
Revision ID: 72365b7c0037
Revises: 9c957ce496bf
Create Date: 2019-06-05 08:56:34.278852
"""
import model.utils
import sqlalchemy as sa
from alembic import op
from rdr_service.participant_enums import GenderIdentity
# revision identifiers, used by Alembic.
revision = "72365b7c0037"
down_revision = "9c957ce496bf"
branch_labels = None
depends_on = None
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("participant_summary", sa.Column("gender_identity", model.utils.Enum(GenderIdentity), nullable=True))
# ### end Alembic commands ###
def downgrade_rdr():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("participant_summary", "gender_identity")
# ### end Alembic commands ###
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
bsd-3-clause
|
guozhangwang/kafka
|
tests/kafkatest/services/trogdor/network_partition_fault_spec.py
|
17
|
1816
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kafkatest.services.trogdor.task_spec import TaskSpec
class NetworkPartitionFaultSpec(TaskSpec):
"""
The specification for a network partition fault.
Network partition faults fracture the network into different partitions
that cannot communicate with each other.
"""
def __init__(self, start_ms, duration_ms, partitions):
"""
Create a new NetworkPartitionFaultSpec.
:param start_ms: The start time, as described in task_spec.py
:param duration_ms: The duration in milliseconds.
:param partitions: An array of arrays describing the partitions.
The inner arrays may contain either node names,
or ClusterNode objects.
"""
super(NetworkPartitionFaultSpec, self).__init__(start_ms, duration_ms)
self.message["class"] = "org.apache.kafka.trogdor.fault.NetworkPartitionFaultSpec"
self.message["partitions"] = [TaskSpec.to_node_names(p) for p in partitions]
|
apache-2.0
|
gunny26/datalogger
|
datalogger/Test_DataLogger.py
|
1
|
4699
|
#!/usr/bin/python2
from __future__ import print_function
import unittest
import logging
logging.basicConfig(level=logging.INFO)
import datetime
import gzip
import json
import os
# own modules
from DataLogger import DataLogger as DataLogger
from Timeseries import Timeseries as Timeseries
from TimeseriesArray import TimeseriesArray as TimeseriesArray
from TimeseriesStats import TimeseriesStats as TimeseriesStats
from TimeseriesArrayStats import TimeseriesArrayStats as TimeseriesArrayStats
from Quantile import QuantileArray as QuantileArray
from Quantile import Quantile as Quantile
class Test(unittest.TestCase):
def setUp(self):
self.basedir = "testdata"
self.project = "mysql"
self.tablename = "performance"
self.datestring = "2018-04-01"
self.datalogger = DataLogger(self.basedir)
def notest__str__(self):
print(self.datalogger)
def test__init__(self):
try:
DataLogger("/nonexisting")
except AttributeError as exc:
print("Expected Exception: %s" % exc)
try:
dl = DataLogger("testdata")
dl.setup("unknownproject", self.tablename, "2018-04-01")
except AttributeError as exc:
print("Expected Exception: %s" % exc)
try:
DataLogger("testdata")
dl.setup("sanportperf", "unknowntablename", "2018-04-01")
except AttributeError as exc:
print("Expected Exception: %s" % exc)
try:
DataLogger("testdata")
dl.setup("sanportperf", "fcIfC3AccountingTable", "2018-04-01")
except AttributeError as exc:
print("Expected Exception: %s" % exc)
def test__getitem__(self):
dl = DataLogger("testdata")
dl.setup("mysql", "performance", "2018-04-01")
caches = dl["caches"]
print(caches)
assert isinstance(caches, dict)
tsa = dl["tsa"]
print(tsa)
assert isinstance(tsa, TimeseriesArray)
ts = dl["tsa", ("nagios.tilak.cc",)]
print(ts)
assert isinstance(ts, Timeseries)
assert tsa[("nagios.tilak.cc",)] == ts
tsastats = dl["tsastats"]
print(tsastats)
assert isinstance(tsastats, TimeseriesArrayStats)
tsstats = dl["tsastats", ("nagios.tilak.cc",)]
print(tsstats)
assert isinstance(tsstats, TimeseriesStats)
assert tsastats[("nagios.tilak.cc",)] == tsstats
qa = dl["qa"]
print(qa)
assert isinstance(qa, QuantileArray)
quantile = dl["qa", ("nagios.tilak.cc",)]
print(quantile)
assert isinstance(quantile, dict)
assert qa[("nagios.tilak.cc",)] == quantile
def test_load_tsa(self):
dl = DataLogger("testdata")
dl.setup("sanportperf", "fcIfC3AccountingTable", "2018-04-01")
dl.delete_caches()
tsa = dl.load_tsa()
#print(tsa)
dl = DataLogger("testdata")
dl.setup("mysql", "performance", "2018-04-01")
dl.delete_caches()
tsa = dl.load_tsa()
#print(tsa)
def test_load_tsastats(self):
dl = DataLogger("testdata")
dl.setup("sanportperf", "fcIfC3AccountingTable", "2018-04-01")
dl.delete_caches()
tsastats = dl.load_tsastats()
#print(tsa)
dl = DataLogger("testdata")
dl.setup("mysql", "performance", "2018-04-01")
dl.delete_caches()
tsastats = dl.load_tsastats()
#print(tsa)
def test_load_quantiles(self):
dl = DataLogger("testdata")
dl.setup("sanportperf", "fcIfC3AccountingTable", "2018-04-01")
dl.delete_caches()
quantiles = dl.load_quantile()
#print(tsa)
dl = DataLogger("testdata")
dl.setup("mysql", "performance", "2018-04-01")
dl.delete_caches()
quantiles = dl.load_quantile()
#print(tsa)
def test_load_caches(self):
dl = DataLogger("testdata")
dl.setup("mysql", "performance", "2018-04-01")
dl.delete_caches()
print(dl.get_caches())
tsa = dl.load_tsa()
print(dl.get_caches())
def test_total_stats(self):
dl = DataLogger("testdata")
dl.setup("mysql", "performance", "2018-04-01")
dl.delete_caches()
total_stats = dl.load_total_stats()
print(json.dumps(total_stats, indent=4))
def test_raw_reader(self):
dl = DataLogger("testdata")
dl.setup("mysql", "performance", "2018-04-01")
for row in dl.raw_reader():
pass
assert row['bytes_received'] == '272517939'
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
unittest.main()
|
apache-2.0
|
thaskell1/volatility
|
volatility/plugins/addrspaces/intel.py
|
8
|
11081
|
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (C) 2004,2005,2006 4tphi Research
#
# Authors:
# {npetroni,awalters}@4tphi.net (Nick Petroni and AAron Walters)
# Michael Cohen <scudette@users.sourceforge.net>
# Mike Auty <mike.auty@gmail.com>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import struct
import volatility.plugins.addrspaces.paged as paged
import volatility.obj as obj
entry_size = 8
pointer_size = 4
page_shift = 12
ptrs_per_pte = 1024
ptrs_per_pgd = 1024
ptrs_per_pae_pte = 512
ptrs_per_pae_pgd = 512
ptrs_per_pdpi = 4
pgdir_shift = 22
pdpi_shift = 30
pdptb_shift = 5
pde_shift = 21
ptrs_per_pde = 512
ptrs_page = 2048
class IA32PagedMemory(paged.AbstractWritablePagedMemory):
""" Standard IA-32 paging address space.
This class implements the IA-32 paging address space. It is responsible
for translating each virtual (linear) address to a physical address.
This is accomplished using hierachical paging structures.
Every paging structure is 4096 bytes and is composed of entries.
Each entry is 32 bits. The first paging structure is located at the
physical address found in CR3 (dtb).
Additional Resources:
- Intel(R) 64 and IA-32 Architectures Software Developer's Manual
Volume 3A: System Programming Guide. Section 4.3
http://www.intel.com/products/processor/manuals/index.htm
- AMD64 Architecture Programmer's Manual Volume 2: System Programming
http://support.amd.com/us/Processor_TechDocs/24593_APM_v2.pdf
- N. Petroni, A. Walters, T. Fraser, and W. Arbaugh, "FATKit: A Framework
for the Extraction and Analysis of Digital Forensic Data from Volatile
System Memory" ,Digital Investigation Journal 3(4):197-210, December 2006.
(submitted February 2006)
- N. P. Maclean, "Acquisition and Analysis of Windows Memory,"
University of Strathclyde, Glasgow, April 2006.
- Russinovich, M., & Solomon, D., & Ionescu, A.
"Windows Internals, 5th Edition", Microsoft Press, 2009.
"""
order = 70
pae = False
paging_address_space = True
checkname = 'IA32ValidAS'
# Hardcoded page info to avoid expensive recalculation
minimum_size = 0x1000
alignment_gcd = 0x1000
_long_struct = struct.Struct('<I')
def __init__(self, base, config, dtb = 0, skip_as_check = False, *args, **kwargs):
## We must be stacked on someone else:
self.as_assert(base, "No base Address Space")
paged.AbstractWritablePagedMemory.__init__(self, base, config, dtb = dtb, skip_as_check = skip_as_check, *args, **kwargs)
def is_valid_profile(self, profile):
return profile.metadata.get('memory_model', '32bit') == '32bit' or profile.metadata.get('os', 'Unknown').lower() == 'mac'
def entry_present(self, entry):
if entry:
if (entry & 1):
return True
arch = self.profile.metadata.get('os', 'Unknown').lower()
# The page is in transition and not a prototype.
# Thus, we will treat it as present.
if arch == "windows" and ((entry & (1 << 11)) and not (entry & (1 << 10))):
return True
# Linux pages that have had mprotect(...PROT_NONE) called on them
# have the present bit cleared and global bit set
if arch == "linux" and (entry & (1 << 8)):
return True
return False
def page_size_flag(self, entry):
if (entry & (1 << 7)) == (1 << 7):
return True
return False
def pgd_index(self, pgd):
return (pgd >> pgdir_shift) & (ptrs_per_pgd - 1)
def get_pgd(self, vaddr):
pgd_entry = self.dtb + self.pgd_index(vaddr) * pointer_size
return self.read_long_phys(pgd_entry)
def pte_pfn(self, pte):
return pte >> page_shift
def pte_index(self, pte):
return (pte >> page_shift) & (ptrs_per_pte - 1)
def get_pte(self, vaddr, pgd):
pgd_val = pgd & ~((1 << page_shift) - 1)
pgd_val = pgd_val + self.pte_index(vaddr) * pointer_size
return self.read_long_phys(pgd_val)
def get_paddr(self, vaddr, pte):
return (self.pte_pfn(pte) << page_shift) | (vaddr & ((1 << page_shift) - 1))
def get_four_meg_paddr(self, vaddr, pgd_entry):
return (pgd_entry & ((ptrs_per_pgd - 1) << 22)) | (vaddr & ~((ptrs_per_pgd - 1) << 22))
def vtop(self, vaddr):
retVal = None
pgd = self.get_pgd(vaddr)
if self.entry_present(pgd):
if self.page_size_flag(pgd):
retVal = self.get_four_meg_paddr(vaddr, pgd)
else:
pte = self.get_pte(vaddr, pgd)
if not pte:
return None
if self.entry_present(pte):
retVal = self.get_paddr(vaddr, pte)
return retVal
def read_long_phys(self, addr):
try:
string = self.base.read(addr, 4)
except IOError:
string = None
if not string:
return obj.NoneObject("Unable to read_long_phys at " + hex(addr))
longval, = self._long_struct.unpack(string)
return longval
def get_available_pages(self):
pgd_curr = self.dtb
for i in range(0, ptrs_per_pgd):
start = (i * ptrs_per_pgd * ptrs_per_pte * 4)
entry = self.read_long_phys(pgd_curr)
pgd_curr = pgd_curr + 4
if self.entry_present(entry) and self.page_size_flag(entry):
yield (start, 0x400000)
elif self.entry_present(entry):
pte_curr = entry & ~((1 << page_shift) - 1)
for j in range(0, ptrs_per_pte):
pte_entry = self.read_long_phys(pte_curr)
pte_curr = pte_curr + 4
if self.entry_present(pte_entry):
yield (start + j * 0x1000, 0x1000)
class IA32PagedMemoryPae(IA32PagedMemory):
"""
This class implements the IA-32 PAE paging address space. It is responsible
for translating each 32-bit virtual (linear) address to a 52-bit physical address.
When PAE paging is in use, CR3 references the base of a 32-Byte Page Directory
Pointer Table.
Additional Resources:
- Intel(R) 64 and IA-32 Architectures Software Developer's Manual
Volume 3A: System Programming Guide. Section 4.3
http://www.intel.com/products/processor/manuals/index.htm
- N. Petroni, A. Walters, T. Fraser, and W. Arbaugh, "FATKit: A Framework
for the Extraction and Analysis of Digital Forensic Data from Volatile
System Memory" ,Digital Investigation Journal 3(4):197-210, December 2006.
(submitted February 2006)
- N. P. Maclean, "Acquisition and Analysis of Windows Memory,"
University of Strathclyde, Glasgow, April 2006.
- Russinovich, M., & Solomon, D., & Ionescu, A.
"Windows Internals, 5th Edition", Microsoft Press, 2009.
"""
order = 60
pae = True
_longlong_struct = struct.Struct('<Q')
def get_pdptb(self, pdpr):
return pdpr & 0xFFFFFFE0
def pdpi_index(self, pdpi):
return (pdpi >> pdpi_shift)
def get_pdpi(self, vaddr):
pdpi_entry = self.get_pdptb(self.dtb) + self.pdpi_index(vaddr) * entry_size
return self._read_long_long_phys(pdpi_entry)
def pde_index(self, vaddr):
return (vaddr >> pde_shift) & (ptrs_per_pde - 1)
def pdba_base(self, pdpe):
return pdpe & 0xFFFFFFFFFF000
def get_pgd(self, vaddr, pdpe):
pgd_entry = self.pdba_base(pdpe) + self.pde_index(vaddr) * entry_size
return self._read_long_long_phys(pgd_entry)
def pte_pfn(self, pte):
return pte & 0xFFFFFFFFFF000
def pte_index(self, vaddr):
return (vaddr >> page_shift) & (ptrs_per_pde - 1)
def ptba_base(self, pde):
return pde & 0xFFFFFFFFFF000
def get_pte(self, vaddr, pgd):
pgd_val = self.ptba_base(pgd) + self.pte_index(vaddr) * entry_size
return self._read_long_long_phys(pgd_val)
def get_paddr(self, vaddr, pte):
return self.pte_pfn(pte) | (vaddr & ((1 << page_shift) - 1))
def get_large_paddr(self, vaddr, pgd_entry):
return (pgd_entry & 0xFFFFFFFE00000) | (vaddr & ~((ptrs_page - 1) << 21))
def vtop(self, vaddr):
retVal = None
pdpe = self.get_pdpi(vaddr)
if not self.entry_present(pdpe):
return retVal
pgd = self.get_pgd(vaddr, pdpe)
if self.entry_present(pgd):
if self.page_size_flag(pgd):
retVal = self.get_large_paddr(vaddr, pgd)
else:
pte = self.get_pte(vaddr, pgd)
if self.entry_present(pte):
retVal = self.get_paddr(vaddr, pte)
return retVal
def _read_long_long_phys(self, addr):
if not addr:
return obj.NoneObject("Unable to read None")
try:
string = self.base.read(addr, 8)
except IOError:
string = None
if not string:
return obj.NoneObject("Unable to read base AS at " + hex(addr))
longlongval, = self._longlong_struct.unpack(string)
return longlongval
def get_available_pages(self):
pdpi_base = self.get_pdptb(self.dtb)
for i in range(0, ptrs_per_pdpi):
start = (i * ptrs_per_pae_pgd * ptrs_per_pae_pgd * ptrs_per_pae_pte * 8)
pdpi_entry = pdpi_base + i * entry_size
pdpe = self._read_long_long_phys(pdpi_entry)
if not self.entry_present(pdpe):
continue
pgd_curr = self.pdba_base(pdpe)
for j in range(0, ptrs_per_pae_pgd):
soffset = start + (j * ptrs_per_pae_pgd * ptrs_per_pae_pte * 8)
entry = self._read_long_long_phys(pgd_curr)
pgd_curr = pgd_curr + 8
if self.entry_present(entry) and self.page_size_flag(entry):
yield (soffset, 0x200000)
elif self.entry_present(entry):
pte_curr = entry & ~((1 << page_shift) - 1)
for k in range(0, ptrs_per_pae_pte):
pte_entry = self._read_long_long_phys(pte_curr)
pte_curr = pte_curr + 8
if self.entry_present(pte_entry):
yield (soffset + k * 0x1000, 0x1000)
|
gpl-2.0
|
RossBrunton/django
|
tests/admin_ordering/tests.py
|
279
|
6702
|
from __future__ import unicode_literals
from django.contrib import admin
from django.contrib.admin.options import ModelAdmin
from django.contrib.auth.models import User
from django.test import RequestFactory, TestCase
from .models import (
Band, DynOrderingBandAdmin, Song, SongInlineDefaultOrdering,
SongInlineNewOrdering,
)
class MockRequest(object):
pass
class MockSuperUser(object):
def has_perm(self, perm):
return True
def has_module_perms(self, module):
return True
request = MockRequest()
request.user = MockSuperUser()
site = admin.AdminSite()
class TestAdminOrdering(TestCase):
"""
Let's make sure that ModelAdmin.get_queryset uses the ordering we define
in ModelAdmin rather that ordering defined in the model's inner Meta
class.
"""
def setUp(self):
self.request_factory = RequestFactory()
Band.objects.bulk_create([
Band(name='Aerosmith', bio='', rank=3),
Band(name='Radiohead', bio='', rank=1),
Band(name='Van Halen', bio='', rank=2),
])
def test_default_ordering(self):
"""
The default ordering should be by name, as specified in the inner Meta
class.
"""
ma = ModelAdmin(Band, site)
names = [b.name for b in ma.get_queryset(request)]
self.assertListEqual(['Aerosmith', 'Radiohead', 'Van Halen'], names)
def test_specified_ordering(self):
"""
Let's use a custom ModelAdmin that changes the ordering, and make sure
it actually changes.
"""
class BandAdmin(ModelAdmin):
ordering = ('rank',) # default ordering is ('name',)
ma = BandAdmin(Band, site)
names = [b.name for b in ma.get_queryset(request)]
self.assertListEqual(['Radiohead', 'Van Halen', 'Aerosmith'], names)
def test_dynamic_ordering(self):
"""
Let's use a custom ModelAdmin that changes the ordering dynamically.
"""
super_user = User.objects.create(username='admin', is_superuser=True)
other_user = User.objects.create(username='other')
request = self.request_factory.get('/')
request.user = super_user
ma = DynOrderingBandAdmin(Band, site)
names = [b.name for b in ma.get_queryset(request)]
self.assertListEqual(['Radiohead', 'Van Halen', 'Aerosmith'], names)
request.user = other_user
names = [b.name for b in ma.get_queryset(request)]
self.assertListEqual(['Aerosmith', 'Radiohead', 'Van Halen'], names)
class TestInlineModelAdminOrdering(TestCase):
"""
Let's make sure that InlineModelAdmin.get_queryset uses the ordering we
define in InlineModelAdmin.
"""
def setUp(self):
self.band = Band.objects.create(name='Aerosmith', bio='', rank=3)
Song.objects.bulk_create([
Song(band=self.band, name='Pink', duration=235),
Song(band=self.band, name='Dude (Looks Like a Lady)', duration=264),
Song(band=self.band, name='Jaded', duration=214),
])
def test_default_ordering(self):
"""
The default ordering should be by name, as specified in the inner Meta
class.
"""
inline = SongInlineDefaultOrdering(self.band, site)
names = [s.name for s in inline.get_queryset(request)]
self.assertListEqual(['Dude (Looks Like a Lady)', 'Jaded', 'Pink'], names)
def test_specified_ordering(self):
"""
Let's check with ordering set to something different than the default.
"""
inline = SongInlineNewOrdering(self.band, site)
names = [s.name for s in inline.get_queryset(request)]
self.assertListEqual(['Jaded', 'Pink', 'Dude (Looks Like a Lady)'], names)
class TestRelatedFieldsAdminOrdering(TestCase):
def setUp(self):
self.b1 = Band.objects.create(name='Pink Floyd', bio='', rank=1)
self.b2 = Band.objects.create(name='Foo Fighters', bio='', rank=5)
# we need to register a custom ModelAdmin (instead of just using
# ModelAdmin) because the field creator tries to find the ModelAdmin
# for the related model
class SongAdmin(admin.ModelAdmin):
pass
site.register(Song, SongAdmin)
def tearDown(self):
site.unregister(Song)
if Band in site._registry:
site.unregister(Band)
def check_ordering_of_field_choices(self, correct_ordering):
fk_field = site._registry[Song].formfield_for_foreignkey(Song.band.field)
m2m_field = site._registry[Song].formfield_for_manytomany(Song.other_interpreters.field)
self.assertListEqual(list(fk_field.queryset), correct_ordering)
self.assertListEqual(list(m2m_field.queryset), correct_ordering)
def test_no_admin_fallback_to_model_ordering(self):
# should be ordered by name (as defined by the model)
self.check_ordering_of_field_choices([self.b2, self.b1])
def test_admin_with_no_ordering_fallback_to_model_ordering(self):
class NoOrderingBandAdmin(admin.ModelAdmin):
pass
site.register(Band, NoOrderingBandAdmin)
# should be ordered by name (as defined by the model)
self.check_ordering_of_field_choices([self.b2, self.b1])
def test_admin_ordering_beats_model_ordering(self):
class StaticOrderingBandAdmin(admin.ModelAdmin):
ordering = ('rank',)
site.register(Band, StaticOrderingBandAdmin)
# should be ordered by rank (defined by the ModelAdmin)
self.check_ordering_of_field_choices([self.b1, self.b2])
def test_custom_queryset_still_wins(self):
"""Test that custom queryset has still precedence (#21405)"""
class SongAdmin(admin.ModelAdmin):
# Exclude one of the two Bands from the querysets
def formfield_for_foreignkey(self, db_field, **kwargs):
if db_field.name == 'band':
kwargs["queryset"] = Band.objects.filter(rank__gt=2)
return super(SongAdmin, self).formfield_for_foreignkey(db_field, **kwargs)
def formfield_for_manytomany(self, db_field, **kwargs):
if db_field.name == 'other_interpreters':
kwargs["queryset"] = Band.objects.filter(rank__gt=2)
return super(SongAdmin, self).formfield_for_foreignkey(db_field, **kwargs)
class StaticOrderingBandAdmin(admin.ModelAdmin):
ordering = ('rank',)
site.unregister(Song)
site.register(Song, SongAdmin)
site.register(Band, StaticOrderingBandAdmin)
self.check_ordering_of_field_choices([self.b2])
|
bsd-3-clause
|
MetrodataTeam/incubator-airflow
|
scripts/perf/dags/perf_dag_1.py
|
44
|
1350
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import airflow
from airflow.operators.bash_operator import BashOperator
from airflow.models import DAG
from datetime import timedelta
args = {
'owner': 'airflow',
'start_date': airflow.utils.dates.days_ago(3),
}
dag = DAG(
dag_id='perf_dag_1', default_args=args,
schedule_interval='@daily',
dagrun_timeout=timedelta(minutes=60))
task_1 = BashOperator(
task_id='perf_task_1',
bash_command='sleep 5; echo "run_id={{ run_id }} | dag_run={{ dag_run }}"',
dag=dag)
for i in range(2, 5):
task = BashOperator(
task_id='perf_task_{}'.format(i),
bash_command='''
sleep 5; echo "run_id={{ run_id }} | dag_run={{ dag_run }}"
''',
dag=dag)
task.set_upstream(task_1)
if __name__ == "__main__":
dag.cli()
|
apache-2.0
|
igemsoftware/SYSU-Software2013
|
project/Python27/Lib/cookielib.py
|
80
|
64452
|
"""HTTP cookie handling for web clients.
This module has (now fairly distant) origins in Gisle Aas' Perl module
HTTP::Cookies, from the libwww-perl library.
Docstrings, comments and debug strings in this code refer to the
attributes of the HTTP cookie system as cookie-attributes, to distinguish
them clearly from Python attributes.
Class diagram (note that BSDDBCookieJar and the MSIE* classes are not
distributed with the Python standard library, but are available from
http://wwwsearch.sf.net/):
CookieJar____
/ \ \
FileCookieJar \ \
/ | \ \ \
MozillaCookieJar | LWPCookieJar \ \
| | \
| ---MSIEBase | \
| / | | \
| / MSIEDBCookieJar BSDDBCookieJar
|/
MSIECookieJar
"""
__all__ = ['Cookie', 'CookieJar', 'CookiePolicy', 'DefaultCookiePolicy',
'FileCookieJar', 'LWPCookieJar', 'lwp_cookie_str', 'LoadError',
'MozillaCookieJar']
import re, urlparse, copy, time, urllib
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
import httplib # only for the default HTTP port
from calendar import timegm
debug = False # set to True to enable debugging via the logging module
logger = None
def _debug(*args):
if not debug:
return
global logger
if not logger:
import logging
logger = logging.getLogger("cookielib")
return logger.debug(*args)
DEFAULT_HTTP_PORT = str(httplib.HTTP_PORT)
MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar "
"instance initialised with one)")
def _warn_unhandled_exception():
# There are a few catch-all except: statements in this module, for
# catching input that's bad in unexpected ways. Warn if any
# exceptions are caught there.
import warnings, traceback, StringIO
f = StringIO.StringIO()
traceback.print_exc(None, f)
msg = f.getvalue()
warnings.warn("cookielib bug!\n%s" % msg, stacklevel=2)
# Date/time conversion
# -----------------------------------------------------------------------------
EPOCH_YEAR = 1970
def _timegm(tt):
year, month, mday, hour, min, sec = tt[:6]
if ((year >= EPOCH_YEAR) and (1 <= month <= 12) and (1 <= mday <= 31) and
(0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61)):
return timegm(tt)
else:
return None
DAYS = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
MONTHS = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
MONTHS_LOWER = []
for month in MONTHS: MONTHS_LOWER.append(month.lower())
def time2isoz(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ",
representing Universal Time (UTC, aka GMT). An example of this format is:
1994-11-24 08:49:37Z
"""
if t is None: t = time.time()
year, mon, mday, hour, min, sec = time.gmtime(t)[:6]
return "%04d-%02d-%02d %02d:%02d:%02dZ" % (
year, mon, mday, hour, min, sec)
def time2netscape(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like this:
Wed, DD-Mon-YYYY HH:MM:SS GMT
"""
if t is None: t = time.time()
year, mon, mday, hour, min, sec, wday = time.gmtime(t)[:7]
return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % (
DAYS[wday], mday, MONTHS[mon-1], year, hour, min, sec)
UTC_ZONES = {"GMT": None, "UTC": None, "UT": None, "Z": None}
TIMEZONE_RE = re.compile(r"^([-+])?(\d\d?):?(\d\d)?$")
def offset_from_tz_string(tz):
offset = None
if tz in UTC_ZONES:
offset = 0
else:
m = TIMEZONE_RE.search(tz)
if m:
offset = 3600 * int(m.group(2))
if m.group(3):
offset = offset + 60 * int(m.group(3))
if m.group(1) == '-':
offset = -offset
return offset
def _str2time(day, mon, yr, hr, min, sec, tz):
# translate month name to number
# month numbers start with 1 (January)
try:
mon = MONTHS_LOWER.index(mon.lower())+1
except ValueError:
# maybe it's already a number
try:
imon = int(mon)
except ValueError:
return None
if 1 <= imon <= 12:
mon = imon
else:
return None
# make sure clock elements are defined
if hr is None: hr = 0
if min is None: min = 0
if sec is None: sec = 0
yr = int(yr)
day = int(day)
hr = int(hr)
min = int(min)
sec = int(sec)
if yr < 1000:
# find "obvious" year
cur_yr = time.localtime(time.time())[0]
m = cur_yr % 100
tmp = yr
yr = yr + cur_yr - m
m = m - tmp
if abs(m) > 50:
if m > 0: yr = yr + 100
else: yr = yr - 100
# convert UTC time tuple to seconds since epoch (not timezone-adjusted)
t = _timegm((yr, mon, day, hr, min, sec, tz))
if t is not None:
# adjust time using timezone string, to get absolute time since epoch
if tz is None:
tz = "UTC"
tz = tz.upper()
offset = offset_from_tz_string(tz)
if offset is None:
return None
t = t - offset
return t
STRICT_DATE_RE = re.compile(
r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) "
"(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$")
WEEKDAY_RE = re.compile(
r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I)
LOOSE_HTTP_DATE_RE = re.compile(
r"""^
(\d\d?) # day
(?:\s+|[-\/])
(\w+) # month
(?:\s+|[-\/])
(\d+) # year
(?:
(?:\s+|:) # separator before clock
(\d\d?):(\d\d) # hour:min
(?::(\d\d))? # optional seconds
)? # optional clock
\s*
([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+)? # timezone
\s*
(?:\(\w+\))? # ASCII representation of timezone in parens.
\s*$""", re.X)
def http2time(text):
"""Returns time in seconds since epoch of time represented by a string.
Return value is an integer.
None is returned if the format of str is unrecognized, the time is outside
the representable range, or the timezone string is not recognized. If the
string contains no timezone, UTC is assumed.
The timezone in the string may be numerical (like "-0800" or "+0100") or a
string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the
timezone strings equivalent to UTC (zero offset) are known to the function.
The function loosely parses the following formats:
Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format
Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format
Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format
09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday)
08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday)
08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday)
The parser ignores leading and trailing whitespace. The time may be
absent.
If the year is given with only 2 digits, the function will select the
century that makes the year closest to the current date.
"""
# fast exit for strictly conforming string
m = STRICT_DATE_RE.search(text)
if m:
g = m.groups()
mon = MONTHS_LOWER.index(g[1].lower()) + 1
tt = (int(g[2]), mon, int(g[0]),
int(g[3]), int(g[4]), float(g[5]))
return _timegm(tt)
# No, we need some messy parsing...
# clean up
text = text.lstrip()
text = WEEKDAY_RE.sub("", text, 1) # Useless weekday
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = LOOSE_HTTP_DATE_RE.search(text)
if m is not None:
day, mon, yr, hr, min, sec, tz = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
ISO_DATE_RE = re.compile(
"""^
(\d{4}) # year
[-\/]?
(\d\d?) # numerical month
[-\/]?
(\d\d?) # day
(?:
(?:\s+|[-:Tt]) # separator before clock
(\d\d?):?(\d\d) # hour:min
(?::?(\d\d(?:\.\d*)?))? # optional seconds (and fractional)
)? # optional clock
\s*
([-+]?\d\d?:?(:?\d\d)?
|Z|z)? # timezone (Z is "zero meridian", i.e. GMT)
\s*$""", re.X)
def iso2time(text):
"""
As for http2time, but parses the ISO 8601 formats:
1994-02-03 14:15:29 -0100 -- ISO 8601 format
1994-02-03 14:15:29 -- zone is optional
1994-02-03 -- only date
1994-02-03T14:15:29 -- Use T as separator
19940203T141529Z -- ISO 8601 compact format
19940203 -- only date
"""
# clean up
text = text.lstrip()
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = ISO_DATE_RE.search(text)
if m is not None:
# XXX there's an extra bit of the timezone I'm ignoring here: is
# this the right thing to do?
yr, mon, day, hr, min, sec, tz, _ = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
# Header parsing
# -----------------------------------------------------------------------------
def unmatched(match):
"""Return unmatched part of re.Match object."""
start, end = match.span(0)
return match.string[:start]+match.string[end:]
HEADER_TOKEN_RE = re.compile(r"^\s*([^=\s;,]+)")
HEADER_QUOTED_VALUE_RE = re.compile(r"^\s*=\s*\"([^\"\\]*(?:\\.[^\"\\]*)*)\"")
HEADER_VALUE_RE = re.compile(r"^\s*=\s*([^\s;,]*)")
HEADER_ESCAPE_RE = re.compile(r"\\(.)")
def split_header_words(header_values):
r"""Parse header values into a list of lists containing key,value pairs.
The function knows how to deal with ",", ";" and "=" as well as quoted
values after "=". A list of space separated tokens are parsed as if they
were separated by ";".
If the header_values passed as argument contains multiple values, then they
are treated as if they were a single value separated by comma ",".
This means that this function is useful for parsing header fields that
follow this syntax (BNF as from the HTTP/1.1 specification, but we relax
the requirement for tokens).
headers = #header
header = (token | parameter) *( [";"] (token | parameter))
token = 1*<any CHAR except CTLs or separators>
separators = "(" | ")" | "<" | ">" | "@"
| "," | ";" | ":" | "\" | <">
| "/" | "[" | "]" | "?" | "="
| "{" | "}" | SP | HT
quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
qdtext = <any TEXT except <">>
quoted-pair = "\" CHAR
parameter = attribute "=" value
attribute = token
value = token | quoted-string
Each header is represented by a list of key/value pairs. The value for a
simple token (not part of a parameter) is None. Syntactically incorrect
headers will not necessarily be parsed as you would want.
This is easier to describe with some examples:
>>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz'])
[[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]]
>>> split_header_words(['text/html; charset="iso-8859-1"'])
[[('text/html', None), ('charset', 'iso-8859-1')]]
>>> split_header_words([r'Basic realm="\"foo\bar\""'])
[[('Basic', None), ('realm', '"foobar"')]]
"""
assert not isinstance(header_values, basestring)
result = []
for text in header_values:
orig_text = text
pairs = []
while text:
m = HEADER_TOKEN_RE.search(text)
if m:
text = unmatched(m)
name = m.group(1)
m = HEADER_QUOTED_VALUE_RE.search(text)
if m: # quoted value
text = unmatched(m)
value = m.group(1)
value = HEADER_ESCAPE_RE.sub(r"\1", value)
else:
m = HEADER_VALUE_RE.search(text)
if m: # unquoted value
text = unmatched(m)
value = m.group(1)
value = value.rstrip()
else:
# no value, a lone token
value = None
pairs.append((name, value))
elif text.lstrip().startswith(","):
# concatenated headers, as per RFC 2616 section 4.2
text = text.lstrip()[1:]
if pairs: result.append(pairs)
pairs = []
else:
# skip junk
non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text)
assert nr_junk_chars > 0, (
"split_header_words bug: '%s', '%s', %s" %
(orig_text, text, pairs))
text = non_junk
if pairs: result.append(pairs)
return result
HEADER_JOIN_ESCAPE_RE = re.compile(r"([\"\\])")
def join_header_words(lists):
"""Do the inverse (almost) of the conversion done by split_header_words.
Takes a list of lists of (key, value) pairs and produces a single header
value. Attribute values are quoted if needed.
>>> join_header_words([[("text/plain", None), ("charset", "iso-8859/1")]])
'text/plain; charset="iso-8859/1"'
>>> join_header_words([[("text/plain", None)], [("charset", "iso-8859/1")]])
'text/plain, charset="iso-8859/1"'
"""
headers = []
for pairs in lists:
attr = []
for k, v in pairs:
if v is not None:
if not re.search(r"^\w+$", v):
v = HEADER_JOIN_ESCAPE_RE.sub(r"\\\1", v) # escape " and \
v = '"%s"' % v
k = "%s=%s" % (k, v)
attr.append(k)
if attr: headers.append("; ".join(attr))
return ", ".join(headers)
def _strip_quotes(text):
if text.startswith('"'):
text = text[1:]
if text.endswith('"'):
text = text[:-1]
return text
def parse_ns_headers(ns_headers):
"""Ad-hoc parser for Netscape protocol cookie-attributes.
The old Netscape cookie format for Set-Cookie can for instance contain
an unquoted "," in the expires field, so we have to use this ad-hoc
parser instead of split_header_words.
XXX This may not make the best possible effort to parse all the crap
that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient
parser is probably better, so could do worse than following that if
this ever gives any trouble.
Currently, this is also used for parsing RFC 2109 cookies.
"""
known_attrs = ("expires", "domain", "path", "secure",
# RFC 2109 attrs (may turn up in Netscape cookies, too)
"version", "port", "max-age")
result = []
for ns_header in ns_headers:
pairs = []
version_set = False
for ii, param in enumerate(re.split(r";\s*", ns_header)):
param = param.rstrip()
if param == "": continue
if "=" not in param:
k, v = param, None
else:
k, v = re.split(r"\s*=\s*", param, 1)
k = k.lstrip()
if ii != 0:
lc = k.lower()
if lc in known_attrs:
k = lc
if k == "version":
# This is an RFC 2109 cookie.
v = _strip_quotes(v)
version_set = True
if k == "expires":
# convert expires date to seconds since epoch
v = http2time(_strip_quotes(v)) # None if invalid
pairs.append((k, v))
if pairs:
if not version_set:
pairs.append(("version", "0"))
result.append(pairs)
return result
IPV4_RE = re.compile(r"\.\d+$")
def is_HDN(text):
"""Return True if text is a host domain name."""
# XXX
# This may well be wrong. Which RFC is HDN defined in, if any (for
# the purposes of RFC 2965)?
# For the current implementation, what about IPv6? Remember to look
# at other uses of IPV4_RE also, if change this.
if IPV4_RE.search(text):
return False
if text == "":
return False
if text[0] == "." or text[-1] == ".":
return False
return True
def domain_match(A, B):
"""Return True if domain A domain-matches domain B, according to RFC 2965.
A and B may be host domain names or IP addresses.
RFC 2965, section 1:
Host names can be specified either as an IP address or a HDN string.
Sometimes we compare one host name with another. (Such comparisons SHALL
be case-insensitive.) Host A's name domain-matches host B's if
* their host name strings string-compare equal; or
* A is a HDN string and has the form NB, where N is a non-empty
name string, B has the form .B', and B' is a HDN string. (So,
x.y.com domain-matches .Y.com but not Y.com.)
Note that domain-match is not a commutative operation: a.b.c.com
domain-matches .c.com, but not the reverse.
"""
# Note that, if A or B are IP addresses, the only relevant part of the
# definition of the domain-match algorithm is the direct string-compare.
A = A.lower()
B = B.lower()
if A == B:
return True
if not is_HDN(A):
return False
i = A.rfind(B)
if i == -1 or i == 0:
# A does not have form NB, or N is the empty string
return False
if not B.startswith("."):
return False
if not is_HDN(B[1:]):
return False
return True
def liberal_is_HDN(text):
"""Return True if text is a sort-of-like a host domain name.
For accepting/blocking domains.
"""
if IPV4_RE.search(text):
return False
return True
def user_domain_match(A, B):
"""For blocking/accepting domains.
A and B may be host domain names or IP addresses.
"""
A = A.lower()
B = B.lower()
if not (liberal_is_HDN(A) and liberal_is_HDN(B)):
if A == B:
# equal IP addresses
return True
return False
initial_dot = B.startswith(".")
if initial_dot and A.endswith(B):
return True
if not initial_dot and A == B:
return True
return False
cut_port_re = re.compile(r":\d+$")
def request_host(request):
"""Return request-host, as defined by RFC 2965.
Variation from RFC: returned value is lowercased, for convenient
comparison.
"""
url = request.get_full_url()
host = urlparse.urlparse(url)[1]
if host == "":
host = request.get_header("Host", "")
# remove port, if present
host = cut_port_re.sub("", host, 1)
return host.lower()
def eff_request_host(request):
"""Return a tuple (request-host, effective request-host name).
As defined by RFC 2965, except both are lowercased.
"""
erhn = req_host = request_host(request)
if req_host.find(".") == -1 and not IPV4_RE.search(req_host):
erhn = req_host + ".local"
return req_host, erhn
def request_path(request):
"""Path component of request-URI, as defined by RFC 2965."""
url = request.get_full_url()
parts = urlparse.urlsplit(url)
path = escape_path(parts.path)
if not path.startswith("/"):
# fix bad RFC 2396 absoluteURI
path = "/" + path
return path
def request_port(request):
host = request.get_host()
i = host.find(':')
if i >= 0:
port = host[i+1:]
try:
int(port)
except ValueError:
_debug("nonnumeric port: '%s'", port)
return None
else:
port = DEFAULT_HTTP_PORT
return port
# Characters in addition to A-Z, a-z, 0-9, '_', '.', and '-' that don't
# need to be escaped to form a valid HTTP URL (RFCs 2396 and 1738).
HTTP_PATH_SAFE = "%/;:@&=+$,!~*'()"
ESCAPED_CHAR_RE = re.compile(r"%([0-9a-fA-F][0-9a-fA-F])")
def uppercase_escaped_char(match):
return "%%%s" % match.group(1).upper()
def escape_path(path):
"""Escape any invalid characters in HTTP URL, and uppercase all escapes."""
# There's no knowing what character encoding was used to create URLs
# containing %-escapes, but since we have to pick one to escape invalid
# path characters, we pick UTF-8, as recommended in the HTML 4.0
# specification:
# http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1
# And here, kind of: draft-fielding-uri-rfc2396bis-03
# (And in draft IRI specification: draft-duerst-iri-05)
# (And here, for new URI schemes: RFC 2718)
if isinstance(path, unicode):
path = path.encode("utf-8")
path = urllib.quote(path, HTTP_PATH_SAFE)
path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path)
return path
def reach(h):
"""Return reach of host h, as defined by RFC 2965, section 1.
The reach R of a host name H is defined as follows:
* If
- H is the host domain name of a host; and,
- H has the form A.B; and
- A has no embedded (that is, interior) dots; and
- B has at least one embedded dot, or B is the string "local".
then the reach of H is .B.
* Otherwise, the reach of H is H.
>>> reach("www.acme.com")
'.acme.com'
>>> reach("acme.com")
'acme.com'
>>> reach("acme.local")
'.local'
"""
i = h.find(".")
if i >= 0:
#a = h[:i] # this line is only here to show what a is
b = h[i+1:]
i = b.find(".")
if is_HDN(h) and (i >= 0 or b == "local"):
return "."+b
return h
def is_third_party(request):
"""
RFC 2965, section 3.3.6:
An unverifiable transaction is to a third-party host if its request-
host U does not domain-match the reach R of the request-host O in the
origin transaction.
"""
req_host = request_host(request)
if not domain_match(req_host, reach(request.get_origin_req_host())):
return True
else:
return False
class Cookie:
"""HTTP Cookie.
This class represents both Netscape and RFC 2965 cookies.
This is deliberately a very simple class. It just holds attributes. It's
possible to construct Cookie instances that don't comply with the cookie
standards. CookieJar.make_cookies is the factory function for Cookie
objects -- it deals with cookie parsing, supplying defaults, and
normalising to the representation used in this class. CookiePolicy is
responsible for checking them to see whether they should be accepted from
and returned to the server.
Note that the port may be present in the headers, but unspecified ("Port"
rather than"Port=80", for example); if this is the case, port is None.
"""
def __init__(self, version, name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest,
rfc2109=False,
):
if version is not None: version = int(version)
if expires is not None: expires = int(expires)
if port is None and port_specified is True:
raise ValueError("if port is None, port_specified must be false")
self.version = version
self.name = name
self.value = value
self.port = port
self.port_specified = port_specified
# normalise case, as per RFC 2965 section 3.3.3
self.domain = domain.lower()
self.domain_specified = domain_specified
# Sigh. We need to know whether the domain given in the
# cookie-attribute had an initial dot, in order to follow RFC 2965
# (as clarified in draft errata). Needed for the returned $Domain
# value.
self.domain_initial_dot = domain_initial_dot
self.path = path
self.path_specified = path_specified
self.secure = secure
self.expires = expires
self.discard = discard
self.comment = comment
self.comment_url = comment_url
self.rfc2109 = rfc2109
self._rest = copy.copy(rest)
def has_nonstandard_attr(self, name):
return name in self._rest
def get_nonstandard_attr(self, name, default=None):
return self._rest.get(name, default)
def set_nonstandard_attr(self, name, value):
self._rest[name] = value
def is_expired(self, now=None):
if now is None: now = time.time()
if (self.expires is not None) and (self.expires <= now):
return True
return False
def __str__(self):
if self.port is None: p = ""
else: p = ":"+self.port
limit = self.domain + p + self.path
if self.value is not None:
namevalue = "%s=%s" % (self.name, self.value)
else:
namevalue = self.name
return "<Cookie %s for %s>" % (namevalue, limit)
def __repr__(self):
args = []
for name in ("version", "name", "value",
"port", "port_specified",
"domain", "domain_specified", "domain_initial_dot",
"path", "path_specified",
"secure", "expires", "discard", "comment", "comment_url",
):
attr = getattr(self, name)
args.append("%s=%s" % (name, repr(attr)))
args.append("rest=%s" % repr(self._rest))
args.append("rfc2109=%s" % repr(self.rfc2109))
return "Cookie(%s)" % ", ".join(args)
class CookiePolicy:
"""Defines which cookies get accepted from and returned to server.
May also modify cookies, though this is probably a bad idea.
The subclass DefaultCookiePolicy defines the standard rules for Netscape
and RFC 2965 cookies -- override that if you want a customised policy.
"""
def set_ok(self, cookie, request):
"""Return true if (and only if) cookie should be accepted from server.
Currently, pre-expired cookies never get this far -- the CookieJar
class deletes such cookies itself.
"""
raise NotImplementedError()
def return_ok(self, cookie, request):
"""Return true if (and only if) cookie should be returned to server."""
raise NotImplementedError()
def domain_return_ok(self, domain, request):
"""Return false if cookies should not be returned, given cookie domain.
"""
return True
def path_return_ok(self, path, request):
"""Return false if cookies should not be returned, given cookie path.
"""
return True
class DefaultCookiePolicy(CookiePolicy):
"""Implements the standard rules for accepting and returning cookies."""
DomainStrictNoDots = 1
DomainStrictNonDomain = 2
DomainRFC2965Match = 4
DomainLiberal = 0
DomainStrict = DomainStrictNoDots|DomainStrictNonDomain
def __init__(self,
blocked_domains=None, allowed_domains=None,
netscape=True, rfc2965=False,
rfc2109_as_netscape=None,
hide_cookie2=False,
strict_domain=False,
strict_rfc2965_unverifiable=True,
strict_ns_unverifiable=False,
strict_ns_domain=DomainLiberal,
strict_ns_set_initial_dollar=False,
strict_ns_set_path=False,
):
"""Constructor arguments should be passed as keyword arguments only."""
self.netscape = netscape
self.rfc2965 = rfc2965
self.rfc2109_as_netscape = rfc2109_as_netscape
self.hide_cookie2 = hide_cookie2
self.strict_domain = strict_domain
self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable
self.strict_ns_unverifiable = strict_ns_unverifiable
self.strict_ns_domain = strict_ns_domain
self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar
self.strict_ns_set_path = strict_ns_set_path
if blocked_domains is not None:
self._blocked_domains = tuple(blocked_domains)
else:
self._blocked_domains = ()
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def blocked_domains(self):
"""Return the sequence of blocked domains (as a tuple)."""
return self._blocked_domains
def set_blocked_domains(self, blocked_domains):
"""Set the sequence of blocked domains."""
self._blocked_domains = tuple(blocked_domains)
def is_blocked(self, domain):
for blocked_domain in self._blocked_domains:
if user_domain_match(domain, blocked_domain):
return True
return False
def allowed_domains(self):
"""Return None, or the sequence of allowed domains (as a tuple)."""
return self._allowed_domains
def set_allowed_domains(self, allowed_domains):
"""Set the sequence of allowed domains, or None."""
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def is_not_allowed(self, domain):
if self._allowed_domains is None:
return False
for allowed_domain in self._allowed_domains:
if user_domain_match(domain, allowed_domain):
return False
return True
def set_ok(self, cookie, request):
"""
If you override .set_ok(), be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to accept).
"""
_debug(" - checking cookie %s=%s", cookie.name, cookie.value)
assert cookie.name is not None
for n in "version", "verifiability", "name", "path", "domain", "port":
fn_name = "set_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def set_ok_version(self, cookie, request):
if cookie.version is None:
# Version is always set to 0 by parse_ns_headers if it's a Netscape
# cookie, so this must be an invalid RFC 2965 cookie.
_debug(" Set-Cookie2 without version attribute (%s=%s)",
cookie.name, cookie.value)
return False
if cookie.version > 0 and not self.rfc2965:
_debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
_debug(" Netscape cookies are switched off")
return False
return True
def set_ok_verifiability(self, cookie, request):
if request.is_unverifiable() and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
_debug(" third-party RFC 2965 cookie during "
"unverifiable transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
_debug(" third-party Netscape cookie during "
"unverifiable transaction")
return False
return True
def set_ok_name(self, cookie, request):
# Try and stop servers setting V0 cookies designed to hack other
# servers that know both V0 and V1 protocols.
if (cookie.version == 0 and self.strict_ns_set_initial_dollar and
cookie.name.startswith("$")):
_debug(" illegal name (starts with '$'): '%s'", cookie.name)
return False
return True
def set_ok_path(self, cookie, request):
if cookie.path_specified:
req_path = request_path(request)
if ((cookie.version > 0 or
(cookie.version == 0 and self.strict_ns_set_path)) and
not req_path.startswith(cookie.path)):
_debug(" path attribute %s is not a prefix of request "
"path %s", cookie.path, req_path)
return False
return True
def set_ok_domain(self, cookie, request):
if self.is_blocked(cookie.domain):
_debug(" domain %s is in user block-list", cookie.domain)
return False
if self.is_not_allowed(cookie.domain):
_debug(" domain %s is not in user allow-list", cookie.domain)
return False
if cookie.domain_specified:
req_host, erhn = eff_request_host(request)
domain = cookie.domain
if self.strict_domain and (domain.count(".") >= 2):
# XXX This should probably be compared with the Konqueror
# (kcookiejar.cpp) and Mozilla implementations, but it's a
# losing battle.
i = domain.rfind(".")
j = domain.rfind(".", 0, i)
if j == 0: # domain like .foo.bar
tld = domain[i+1:]
sld = domain[j+1:i]
if sld.lower() in ("co", "ac", "com", "edu", "org", "net",
"gov", "mil", "int", "aero", "biz", "cat", "coop",
"info", "jobs", "mobi", "museum", "name", "pro",
"travel", "eu") and len(tld) == 2:
# domain like .co.uk
_debug(" country-code second level domain %s", domain)
return False
if domain.startswith("."):
undotted_domain = domain[1:]
else:
undotted_domain = domain
embedded_dots = (undotted_domain.find(".") >= 0)
if not embedded_dots and domain != ".local":
_debug(" non-local domain %s contains no embedded dot",
domain)
return False
if cookie.version == 0:
if (not erhn.endswith(domain) and
(not erhn.startswith(".") and
not ("."+erhn).endswith(domain))):
_debug(" effective request-host %s (even with added "
"initial dot) does not end end with %s",
erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainRFC2965Match)):
if not domain_match(erhn, domain):
_debug(" effective request-host %s does not domain-match "
"%s", erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainStrictNoDots)):
host_prefix = req_host[:-len(domain)]
if (host_prefix.find(".") >= 0 and
not IPV4_RE.search(req_host)):
_debug(" host prefix %s for domain %s contains a dot",
host_prefix, domain)
return False
return True
def set_ok_port(self, cookie, request):
if cookie.port_specified:
req_port = request_port(request)
if req_port is None:
req_port = "80"
else:
req_port = str(req_port)
for p in cookie.port.split(","):
try:
int(p)
except ValueError:
_debug(" bad port %s (not numeric)", p)
return False
if p == req_port:
break
else:
_debug(" request port (%s) not found in %s",
req_port, cookie.port)
return False
return True
def return_ok(self, cookie, request):
"""
If you override .return_ok(), be sure to call this method. If it
returns false, so should your subclass (assuming your subclass wants to
be more strict about which cookies to return).
"""
# Path has already been checked by .path_return_ok(), and domain
# blocking done by .domain_return_ok().
_debug(" - checking cookie %s=%s", cookie.name, cookie.value)
for n in "version", "verifiability", "secure", "expires", "port", "domain":
fn_name = "return_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def return_ok_version(self, cookie, request):
if cookie.version > 0 and not self.rfc2965:
_debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
_debug(" Netscape cookies are switched off")
return False
return True
def return_ok_verifiability(self, cookie, request):
if request.is_unverifiable() and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
_debug(" third-party RFC 2965 cookie during unverifiable "
"transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
_debug(" third-party Netscape cookie during unverifiable "
"transaction")
return False
return True
def return_ok_secure(self, cookie, request):
if cookie.secure and request.get_type() != "https":
_debug(" secure cookie with non-secure request")
return False
return True
def return_ok_expires(self, cookie, request):
if cookie.is_expired(self._now):
_debug(" cookie expired")
return False
return True
def return_ok_port(self, cookie, request):
if cookie.port:
req_port = request_port(request)
if req_port is None:
req_port = "80"
for p in cookie.port.split(","):
if p == req_port:
break
else:
_debug(" request port %s does not match cookie port %s",
req_port, cookie.port)
return False
return True
def return_ok_domain(self, cookie, request):
req_host, erhn = eff_request_host(request)
domain = cookie.domain
# strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
if (cookie.version == 0 and
(self.strict_ns_domain & self.DomainStrictNonDomain) and
not cookie.domain_specified and domain != erhn):
_debug(" cookie with unspecified domain does not string-compare "
"equal to request domain")
return False
if cookie.version > 0 and not domain_match(erhn, domain):
_debug(" effective request-host name %s does not domain-match "
"RFC 2965 cookie domain %s", erhn, domain)
return False
if cookie.version == 0 and not ("."+erhn).endswith(domain):
_debug(" request-host %s does not match Netscape cookie domain "
"%s", req_host, domain)
return False
return True
def domain_return_ok(self, domain, request):
# Liberal check of. This is here as an optimization to avoid
# having to load lots of MSIE cookie files unless necessary.
req_host, erhn = eff_request_host(request)
if not req_host.startswith("."):
req_host = "."+req_host
if not erhn.startswith("."):
erhn = "."+erhn
if not (req_host.endswith(domain) or erhn.endswith(domain)):
#_debug(" request domain %s does not match cookie domain %s",
# req_host, domain)
return False
if self.is_blocked(domain):
_debug(" domain %s is in user block-list", domain)
return False
if self.is_not_allowed(domain):
_debug(" domain %s is not in user allow-list", domain)
return False
return True
def path_return_ok(self, path, request):
_debug("- checking cookie path=%s", path)
req_path = request_path(request)
if not req_path.startswith(path):
_debug(" %s does not path-match %s", req_path, path)
return False
return True
def vals_sorted_by_key(adict):
keys = adict.keys()
keys.sort()
return map(adict.get, keys)
def deepvalues(mapping):
"""Iterates over nested mapping, depth-first, in sorted order by key."""
values = vals_sorted_by_key(mapping)
for obj in values:
mapping = False
try:
obj.items
except AttributeError:
pass
else:
mapping = True
for subobj in deepvalues(obj):
yield subobj
if not mapping:
yield obj
# Used as second parameter to dict.get() method, to distinguish absent
# dict key from one with a None value.
class Absent: pass
class CookieJar:
"""Collection of HTTP cookies.
You may not need to know about this class: try
urllib2.build_opener(HTTPCookieProcessor).open(url).
"""
non_word_re = re.compile(r"\W")
quote_re = re.compile(r"([\"\\])")
strict_domain_re = re.compile(r"\.?[^.]*")
domain_re = re.compile(r"[^.]*")
dots_re = re.compile(r"^\.+")
magic_re = r"^\#LWP-Cookies-(\d+\.\d+)"
def __init__(self, policy=None):
if policy is None:
policy = DefaultCookiePolicy()
self._policy = policy
self._cookies_lock = _threading.RLock()
self._cookies = {}
def set_policy(self, policy):
self._policy = policy
def _cookies_for_domain(self, domain, request):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
_debug("Checking %s for cookies to return", domain)
cookies_by_path = self._cookies[domain]
for path in cookies_by_path.keys():
if not self._policy.path_return_ok(path, request):
continue
cookies_by_name = cookies_by_path[path]
for cookie in cookies_by_name.values():
if not self._policy.return_ok(cookie, request):
_debug(" not returning cookie")
continue
_debug(" it's a match")
cookies.append(cookie)
return cookies
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
cookies = []
for domain in self._cookies.keys():
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookie_attrs(self, cookies):
"""Return a list of cookie-attributes to be returned to server.
like ['foo="bar"; $Path="/"', ...]
The $Version attribute is also added when appropriate (currently only
once per request).
"""
# add cookies in order of most specific (ie. longest) path first
cookies.sort(key=lambda arg: len(arg.path), reverse=True)
version_set = False
attrs = []
for cookie in cookies:
# set version of Cookie header
# XXX
# What should it be if multiple matching Set-Cookie headers have
# different versions themselves?
# Answer: there is no answer; was supposed to be settled by
# RFC 2965 errata, but that may never appear...
version = cookie.version
if not version_set:
version_set = True
if version > 0:
attrs.append("$Version=%s" % version)
# quote cookie value if necessary
# (not for Netscape protocol, which already has any quotes
# intact, due to the poorly-specified Netscape Cookie: syntax)
if ((cookie.value is not None) and
self.non_word_re.search(cookie.value) and version > 0):
value = self.quote_re.sub(r"\\\1", cookie.value)
else:
value = cookie.value
# add cookie-attributes to be returned in Cookie header
if cookie.value is None:
attrs.append(cookie.name)
else:
attrs.append("%s=%s" % (cookie.name, value))
if version > 0:
if cookie.path_specified:
attrs.append('$Path="%s"' % cookie.path)
if cookie.domain.startswith("."):
domain = cookie.domain
if (not cookie.domain_initial_dot and
domain.startswith(".")):
domain = domain[1:]
attrs.append('$Domain="%s"' % domain)
if cookie.port is not None:
p = "$Port"
if cookie.port_specified:
p = p + ('="%s"' % cookie.port)
attrs.append(p)
return attrs
def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib2.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
"""
_debug("add_cookie_header")
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
cookies = self._cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header(
"Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if (self._policy.rfc2965 and not self._policy.hide_cookie2 and
not request.has_header("Cookie2")):
for cookie in cookies:
if cookie.version != 1:
request.add_unredirected_header("Cookie2", '$Version="1"')
break
finally:
self._cookies_lock.release()
self.clear_expired_cookies()
def _normalized_cookie_tuples(self, attrs_set):
"""Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
"""
cookie_tuples = []
boolean_attrs = "discard", "secure"
value_attrs = ("version",
"expires", "max-age",
"domain", "path", "port",
"comment", "commenturl")
for cookie_attrs in attrs_set:
name, value = cookie_attrs[0]
# Build dictionary of standard cookie-attributes (standard) and
# dictionary of other cookie-attributes (rest).
# Note: expiry time is normalised to seconds since epoch. V0
# cookies should have the Expires cookie-attribute, and V1 cookies
# should have Max-Age, but since V1 includes RFC 2109 cookies (and
# since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
# accept either (but prefer Max-Age).
max_age_set = False
bad_cookie = False
standard = {}
rest = {}
for k, v in cookie_attrs[1:]:
lc = k.lower()
# don't lose case distinction for unknown fields
if lc in value_attrs or lc in boolean_attrs:
k = lc
if k in boolean_attrs and v is None:
# boolean cookie-attribute is present, but has no value
# (like "discard", rather than "port=80")
v = True
if k in standard:
# only first value is significant
continue
if k == "domain":
if v is None:
_debug(" missing value for domain attribute")
bad_cookie = True
break
# RFC 2965 section 3.3.3
v = v.lower()
if k == "expires":
if max_age_set:
# Prefer max-age to expires (like Mozilla)
continue
if v is None:
_debug(" missing or invalid value for expires "
"attribute: treating as session cookie")
continue
if k == "max-age":
max_age_set = True
try:
v = int(v)
except ValueError:
_debug(" missing or invalid (non-numeric) value for "
"max-age attribute")
bad_cookie = True
break
# convert RFC 2965 Max-Age to seconds since epoch
# XXX Strictly you're supposed to follow RFC 2616
# age-calculation rules. Remember that zero Max-Age is a
# is a request to discard (old and new) cookie, though.
k = "expires"
v = self._now + v
if (k in value_attrs) or (k in boolean_attrs):
if (v is None and
k not in ("port", "comment", "commenturl")):
_debug(" missing value for %s attribute" % k)
bad_cookie = True
break
standard[k] = v
else:
rest[k] = v
if bad_cookie:
continue
cookie_tuples.append((name, value, standard, rest))
return cookie_tuples
def _cookie_from_cookie_tuple(self, tup, request):
# standard is dict of standard cookie-attributes, rest is dict of the
# rest of them
name, value, standard, rest = tup
domain = standard.get("domain", Absent)
path = standard.get("path", Absent)
port = standard.get("port", Absent)
expires = standard.get("expires", Absent)
# set the easy defaults
version = standard.get("version", None)
if version is not None:
try:
version = int(version)
except ValueError:
return None # invalid version, ignore cookie
secure = standard.get("secure", False)
# (discard is also set if expires is Absent)
discard = standard.get("discard", False)
comment = standard.get("comment", None)
comment_url = standard.get("commenturl", None)
# set default path
if path is not Absent and path != "":
path_specified = True
path = escape_path(path)
else:
path_specified = False
path = request_path(request)
i = path.rfind("/")
if i != -1:
if version == 0:
# Netscape spec parts company from reality here
path = path[:i]
else:
path = path[:i+1]
if len(path) == 0: path = "/"
# set default domain
domain_specified = domain is not Absent
# but first we have to remember whether it starts with a dot
domain_initial_dot = False
if domain_specified:
domain_initial_dot = bool(domain.startswith("."))
if domain is Absent:
req_host, erhn = eff_request_host(request)
domain = erhn
elif not domain.startswith("."):
domain = "."+domain
# set default port
port_specified = False
if port is not Absent:
if port is None:
# Port attr present, but has no value: default to request port.
# Cookie should then only be sent back on that port.
port = request_port(request)
else:
port_specified = True
port = re.sub(r"\s+", "", port)
else:
# No port attr present. Cookie can be sent back on any port.
port = None
# set default expires and discard
if expires is Absent:
expires = None
discard = True
elif expires <= self._now:
# Expiry date in past is request to delete cookie. This can't be
# in DefaultCookiePolicy, because can't delete cookies there.
try:
self.clear(domain, path, name)
except KeyError:
pass
_debug("Expiring cookie, domain='%s', path='%s', name='%s'",
domain, path, name)
return None
return Cookie(version,
name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest)
def _cookies_from_attrs_set(self, attrs_set, request):
cookie_tuples = self._normalized_cookie_tuples(attrs_set)
cookies = []
for tup in cookie_tuples:
cookie = self._cookie_from_cookie_tuple(tup, request)
if cookie: cookies.append(cookie)
return cookies
def _process_rfc2109_cookies(self, cookies):
rfc2109_as_ns = getattr(self._policy, 'rfc2109_as_netscape', None)
if rfc2109_as_ns is None:
rfc2109_as_ns = not self._policy.rfc2965
for cookie in cookies:
if cookie.version == 1:
cookie.rfc2109 = True
if rfc2109_as_ns:
# treat 2109 cookies as Netscape cookies rather than
# as RFC2965 cookies
cookie.version = 0
def make_cookies(self, response, request):
"""Return sequence of Cookie objects extracted from response object."""
# get cookie-attributes for RFC 2965 and Netscape protocols
headers = response.info()
rfc2965_hdrs = headers.getheaders("Set-Cookie2")
ns_hdrs = headers.getheaders("Set-Cookie")
rfc2965 = self._policy.rfc2965
netscape = self._policy.netscape
if ((not rfc2965_hdrs and not ns_hdrs) or
(not ns_hdrs and not rfc2965) or
(not rfc2965_hdrs and not netscape) or
(not netscape and not rfc2965)):
return [] # no relevant cookie headers: quick exit
try:
cookies = self._cookies_from_attrs_set(
split_header_words(rfc2965_hdrs), request)
except Exception:
_warn_unhandled_exception()
cookies = []
if ns_hdrs and netscape:
try:
# RFC 2109 and Netscape cookies
ns_cookies = self._cookies_from_attrs_set(
parse_ns_headers(ns_hdrs), request)
except Exception:
_warn_unhandled_exception()
ns_cookies = []
self._process_rfc2109_cookies(ns_cookies)
# Look for Netscape cookies (from Set-Cookie headers) that match
# corresponding RFC 2965 cookies (from Set-Cookie2 headers).
# For each match, keep the RFC 2965 cookie and ignore the Netscape
# cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are
# bundled in with the Netscape cookies for this purpose, which is
# reasonable behaviour.
if rfc2965:
lookup = {}
for cookie in cookies:
lookup[(cookie.domain, cookie.path, cookie.name)] = None
def no_matching_rfc2965(ns_cookie, lookup=lookup):
key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
return key not in lookup
ns_cookies = filter(no_matching_rfc2965, ns_cookies)
if ns_cookies:
cookies.extend(ns_cookies)
return cookies
def set_cookie_if_ok(self, cookie, request):
"""Set a cookie if policy says it's OK to do so."""
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
if self._policy.set_ok(cookie, request):
self.set_cookie(cookie)
finally:
self._cookies_lock.release()
def set_cookie(self, cookie):
"""Set a cookie, without checking whether or not it should be set."""
c = self._cookies
self._cookies_lock.acquire()
try:
if cookie.domain not in c: c[cookie.domain] = {}
c2 = c[cookie.domain]
if cookie.path not in c2: c2[cookie.path] = {}
c3 = c2[cookie.path]
c3[cookie.name] = cookie
finally:
self._cookies_lock.release()
def extract_cookies(self, response, request):
"""Extract cookies from response, where allowable given the request."""
_debug("extract_cookies: %s", response.info())
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
for cookie in self.make_cookies(response, request):
if self._policy.set_ok(cookie, request):
_debug(" setting cookie: %s", cookie)
self.set_cookie(cookie)
finally:
self._cookies_lock.release()
def clear(self, domain=None, path=None, name=None):
"""Clear some cookies.
Invoking this method without arguments will clear all cookies. If
given a single argument, only cookies belonging to that domain will be
removed. If given two arguments, cookies belonging to the specified
path within that domain are removed. If given three arguments, then
the cookie with the specified name, path and domain is removed.
Raises KeyError if no matching cookie exists.
"""
if name is not None:
if (domain is None) or (path is None):
raise ValueError(
"domain and path must be given to remove a cookie by name")
del self._cookies[domain][path][name]
elif path is not None:
if domain is None:
raise ValueError(
"domain must be given to remove cookies by path")
del self._cookies[domain][path]
elif domain is not None:
del self._cookies[domain]
else:
self._cookies = {}
def clear_session_cookies(self):
"""Discard all session cookies.
Note that the .save() method won't save session cookies anyway, unless
you ask otherwise by passing a true ignore_discard argument.
"""
self._cookies_lock.acquire()
try:
for cookie in self:
if cookie.discard:
self.clear(cookie.domain, cookie.path, cookie.name)
finally:
self._cookies_lock.release()
def clear_expired_cookies(self):
"""Discard all expired cookies.
You probably don't need to call this method: expired cookies are never
sent back to the server (provided you're using DefaultCookiePolicy),
this method is called by CookieJar itself every so often, and the
.save() method won't save expired cookies anyway (unless you ask
otherwise by passing a true ignore_expires argument).
"""
self._cookies_lock.acquire()
try:
now = time.time()
for cookie in self:
if cookie.is_expired(now):
self.clear(cookie.domain, cookie.path, cookie.name)
finally:
self._cookies_lock.release()
def __iter__(self):
return deepvalues(self._cookies)
def __len__(self):
"""Return number of contained cookies."""
i = 0
for cookie in self: i = i + 1
return i
def __repr__(self):
r = []
for cookie in self: r.append(repr(cookie))
return "<%s[%s]>" % (self.__class__, ", ".join(r))
def __str__(self):
r = []
for cookie in self: r.append(str(cookie))
return "<%s[%s]>" % (self.__class__, ", ".join(r))
# derives from IOError for backwards-compatibility with Python 2.4.0
class LoadError(IOError): pass
class FileCookieJar(CookieJar):
"""CookieJar that can be loaded from and saved to a file."""
def __init__(self, filename=None, delayload=False, policy=None):
"""
Cookies are NOT loaded from the named file until either the .load() or
.revert() method is called.
"""
CookieJar.__init__(self, policy)
if filename is not None:
try:
filename+""
except:
raise ValueError("filename must be string-like")
self.filename = filename
self.delayload = bool(delayload)
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Save cookies to a file."""
raise NotImplementedError()
def load(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Load cookies from a file."""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename)
try:
self._really_load(f, filename, ignore_discard, ignore_expires)
finally:
f.close()
def revert(self, filename=None,
ignore_discard=False, ignore_expires=False):
"""Clear all cookies and reload cookies from a saved file.
Raises LoadError (or IOError) if reversion is not successful; the
object's state will not be altered if this happens.
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
self._cookies_lock.acquire()
try:
old_state = copy.deepcopy(self._cookies)
self._cookies = {}
try:
self.load(filename, ignore_discard, ignore_expires)
except (LoadError, IOError):
self._cookies = old_state
raise
finally:
self._cookies_lock.release()
from _LWPCookieJar import LWPCookieJar, lwp_cookie_str
from _MozillaCookieJar import MozillaCookieJar
|
mit
|
trankmichael/scipy
|
scipy/integrate/setup.py
|
90
|
3250
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from os.path import join
from scipy._build_utils import numpy_nodepr_api
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info
config = Configuration('integrate', parent_package, top_path)
# Get a local copy of lapack_opt_info
lapack_opt = dict(get_info('lapack_opt',notfound_action=2))
# Pop off the libraries list so it can be combined with
# additional required libraries
lapack_libs = lapack_opt.pop('libraries', [])
mach_src = [join('mach','*.f')]
quadpack_src = [join('quadpack','*.f')]
odepack_src = [join('odepack','*.f')]
dop_src = [join('dop','*.f')]
quadpack_test_src = [join('tests','_test_multivariate.c')]
odeint_banded_test_src = [join('tests', 'banded5x5.f')]
config.add_library('mach', sources=mach_src,
config_fc={'noopt':(__file__,1)})
config.add_library('quadpack', sources=quadpack_src)
config.add_library('odepack', sources=odepack_src)
config.add_library('dop', sources=dop_src)
# Extensions
# quadpack:
config.add_extension('_quadpack',
sources=['_quadpackmodule.c'],
libraries=(['quadpack', 'mach'] + lapack_libs),
depends=(['quadpack.h','__quadpack.h']
+ quadpack_src + mach_src),
**lapack_opt)
# odepack
odepack_libs = ['odepack','mach'] + lapack_libs
odepack_opts = lapack_opt.copy()
odepack_opts.update(numpy_nodepr_api)
config.add_extension('_odepack',
sources=['_odepackmodule.c'],
libraries=odepack_libs,
depends=(odepack_src + mach_src),
**odepack_opts)
# vode
config.add_extension('vode',
sources=['vode.pyf'],
libraries=odepack_libs,
depends=(odepack_src
+ mach_src),
**lapack_opt)
# lsoda
config.add_extension('lsoda',
sources=['lsoda.pyf'],
libraries=odepack_libs,
depends=(odepack_src
+ mach_src),
**lapack_opt)
# dop
config.add_extension('_dop',
sources=['dop.pyf'],
libraries=['dop'],
depends=dop_src)
config.add_extension('_test_multivariate',
sources=quadpack_test_src)
# Fortran+f2py extension module for testing odeint.
config.add_extension('_test_odeint_banded',
sources=odeint_banded_test_src,
libraries=odepack_libs,
depends=(odepack_src + mach_src),
**lapack_opt)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
pidydx/grr
|
grr/checks/sysctl_test.py
|
2
|
1498
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for sysctl checks."""
from grr.lib import flags
from grr.lib import test_lib
from grr.lib.checks import checks_test_lib
from grr.parsers import linux_sysctl_parser
class SysctlTests(checks_test_lib.HostCheckTest):
@classmethod
def setUpClass(cls):
cls.LoadCheck("sysctl.yaml")
cls.parser = linux_sysctl_parser.ProcSysParser()
def testRPFilter(self):
"""Ensure rp_filter is set to Strict mode.
rp_filter may be set to three values:
0 - Disabled
1 - Strict Reverse Path
2 - Loose Reverse Path
See https://www.kernel.org/doc/Documentation/networking/ip-sysctl.txt
"""
chk_id = "CIS-NET-RP-FILTER"
test_data = {"/proc/sys/net/ipv4/conf/default/rp_filter": "2"}
host_data = self.GenFileData("LinuxProcSysHardeningSettings", test_data,
self.parser)
results = self.RunChecks(host_data)
sym = "Found: System does not perform path filtering."
found = ["net_ipv4_conf_default_rp_filter: 2"]
self.assertCheckDetectedAnom(chk_id, results, sym, found)
test_data = {"/proc/sys/net/ipv4/conf/default/rp_filter": "1"}
host_data = self.GenFileData("LinuxProcSysHardeningSettings", test_data,
self.parser)
results = self.RunChecks(host_data)
self.assertCheckUndetected(chk_id, results)
def main(argv):
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
|
apache-2.0
|
rhurkes/chasegame
|
venv/lib/python2.7/site-packages/pip/index.py
|
45
|
40374
|
"""Routines related to PyPI, indexes"""
import sys
import os
import re
import mimetypes
import posixpath
from pip.log import logger
from pip.util import Inf, normalize_name, splitext, is_prerelease
from pip.exceptions import (DistributionNotFound, BestVersionAlreadyInstalled,
InstallationError, InvalidWheelFilename, UnsupportedWheel)
from pip.backwardcompat import urlparse, url2pathname
from pip.download import PipSession, url_to_path, path_to_url
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags, supported_tags_noarch, get_platform
import html5lib, requests, pkg_resources
from requests.exceptions import SSLError
__all__ = ['PackageFinder']
DEFAULT_MIRROR_HOSTNAME = "last.pypi.python.org"
INSECURE_SCHEMES = {
"http": ["https"],
}
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links
"""
def __init__(self, find_links, index_urls,
use_wheel=True, allow_external=[], allow_unverified=[],
allow_all_external=False, allow_all_prereleases=False,
process_dependency_links=False, session=None):
self.find_links = find_links
self.index_urls = index_urls
self.dependency_links = []
self.cache = PageCache()
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.use_wheel = use_wheel
# Do we allow (safe and verifiable) externally hosted files?
self.allow_external = set(normalize_name(n) for n in allow_external)
# Which names are allowed to install insecure and unverifiable files?
self.allow_unverified = set(
normalize_name(n) for n in allow_unverified
)
# Anything that is allowed unverified is also allowed external
self.allow_external |= self.allow_unverified
# Do we allow all (safe and verifiable) externally hosted files?
self.allow_all_external = allow_all_external
# Stores if we ignored any external links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_external = False
# Stores if we ignored any unsafe links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_unverified = False
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
self._have_warned_dependency_links = False
# The Session we'll use to make requests
self.session = session or PipSession()
def add_dependency_links(self, links):
## FIXME: this shouldn't be global list this, it should only
## apply to requirements of the package that specifies the
## dependency_links value
## FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
if not self._have_warned_dependency_links:
logger.deprecated(
"1.6",
"Dependency Links processing has been deprecated with an "
"accelerated time schedule and will be removed in pip 1.6",
)
self._have_warned_dependency_links = True
self.dependency_links.extend(links)
def _sort_locations(self, locations):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
is_find_link = url in self.find_links
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if is_find_link and os.path.isdir(path):
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url and os.path.isdir(path):
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls
def _link_sort_key(self, link_tuple):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
parsed_version, link, _ = link_tuple
if self.use_wheel:
support_num = len(supported_tags)
if link == INSTALLED_VERSION:
pri = 1
elif link.ext == wheel_ext:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
if not wheel.supported():
raise UnsupportedWheel("%s is not a supported wheel for this platform. It can't be sorted." % wheel.filename)
pri = -(wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (parsed_version, pri)
else:
return parsed_version
def _sort_versions(self, applicable_versions):
"""
Bring the latest version (and wheels) to the front, but maintain the existing ordering as secondary.
See the docstring for `_link_sort_key` for details.
This function is isolated for easier unit testing.
"""
return sorted(applicable_versions, key=self._link_sort_key, reverse=True)
def find_requirement(self, req, upgrade):
def mkurl_pypi_url(url):
loc = posixpath.join(url, url_name)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
url_name = req.url_name
# Only check main index if index URL is given:
main_index_url = None
if self.index_urls:
# Check that we have the url_name correctly spelled:
main_index_url = Link(mkurl_pypi_url(self.index_urls[0]), trusted=True)
# This will also cache the page, so it's okay that we get it again later:
page = self._get_page(main_index_url, req)
if page is None:
url_name = self._find_url_name(Link(self.index_urls[0], trusted=True), url_name, req) or req.url_name
if url_name is not None:
locations = [
mkurl_pypi_url(url)
for url in self.index_urls] + self.find_links
else:
locations = list(self.find_links)
for version in req.absolute_versions:
if url_name is not None and main_index_url is not None:
locations = [
posixpath.join(main_index_url.url, version)] + locations
file_locations, url_locations = self._sort_locations(locations)
_flocations, _ulocations = self._sort_locations(self.dependency_links)
file_locations.extend(_flocations)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
locations = [Link(url, trusted=True) for url in url_locations]
# We explicitly do not trust links that came from dependency_links
locations.extend([Link(url) for url in _ulocations])
logger.debug('URLs to search for versions for %s:' % req)
for location in locations:
logger.debug('* %s' % location)
# Determine if this url used a secure transport mechanism
parsed = urlparse.urlparse(str(location))
if parsed.scheme in INSECURE_SCHEMES:
secure_schemes = INSECURE_SCHEMES[parsed.scheme]
if len(secure_schemes) == 1:
ctx = (location, parsed.scheme, secure_schemes[0],
parsed.netloc)
logger.warn("%s uses an insecure transport scheme (%s). "
"Consider using %s if %s has it available" %
ctx)
elif len(secure_schemes) > 1:
ctx = (location, parsed.scheme, ", ".join(secure_schemes),
parsed.netloc)
logger.warn("%s uses an insecure transport scheme (%s). "
"Consider using one of %s if %s has any of "
"them available" % ctx)
else:
ctx = (location, parsed.scheme)
logger.warn("%s uses an insecure transport scheme (%s)." %
ctx)
found_versions = []
found_versions.extend(
self._package_versions(
# We trust every directly linked archive in find_links
[Link(url, '-f', trusted=True) for url in self.find_links], req.name.lower()))
page_versions = []
for page in self._get_pages(locations, req):
logger.debug('Analyzing links from page %s' % page.url)
logger.indent += 2
try:
page_versions.extend(self._package_versions(page.links, req.name.lower()))
finally:
logger.indent -= 2
dependency_versions = list(self._package_versions(
[Link(url) for url in self.dependency_links], req.name.lower()))
if dependency_versions:
logger.info('dependency_links found: %s' % ', '.join([link.url for parsed, link, version in dependency_versions]))
file_versions = list(self._package_versions(
[Link(url) for url in file_locations], req.name.lower()))
if not found_versions and not page_versions and not dependency_versions and not file_versions:
logger.fatal('Could not find any downloads that satisfy the requirement %s' % req)
if self.need_warn_external:
logger.warn("Some externally hosted files were ignored (use "
"--allow-external %s to allow)." % req.name)
if self.need_warn_unverified:
logger.warn("Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow)." %
req.name)
raise DistributionNotFound('No distributions at all found for %s' % req)
installed_version = []
if req.satisfied_by is not None:
installed_version = [(req.satisfied_by.parsed_version, INSTALLED_VERSION, req.satisfied_by.version)]
if file_versions:
file_versions.sort(reverse=True)
logger.info('Local files found: %s' % ', '.join([url_to_path(link.url) for parsed, link, version in file_versions]))
#this is an intentional priority ordering
all_versions = installed_version + file_versions + found_versions + page_versions + dependency_versions
applicable_versions = []
for (parsed_version, link, version) in all_versions:
if version not in req.req:
logger.info("Ignoring link %s, version %s doesn't match %s"
% (link, version, ','.join([''.join(s) for s in req.req.specs])))
continue
elif is_prerelease(version) and not (self.allow_all_prereleases or req.prereleases):
# If this version isn't the already installed one, then
# ignore it if it's a pre-release.
if link is not INSTALLED_VERSION:
logger.info("Ignoring link %s, version %s is a pre-release (use --pre to allow)." % (link, version))
continue
applicable_versions.append((parsed_version, link, version))
applicable_versions = self._sort_versions(applicable_versions)
existing_applicable = bool([link for parsed_version, link, version in applicable_versions if link is INSTALLED_VERSION])
if not upgrade and existing_applicable:
if applicable_versions[0][1] is INSTALLED_VERSION:
logger.info('Existing installed version (%s) is most up-to-date and satisfies requirement'
% req.satisfied_by.version)
else:
logger.info('Existing installed version (%s) satisfies requirement (most up-to-date version is %s)'
% (req.satisfied_by.version, applicable_versions[0][2]))
return None
if not applicable_versions:
logger.fatal('Could not find a version that satisfies the requirement %s (from versions: %s)'
% (req, ', '.join([version for parsed_version, link, version in all_versions])))
if self.need_warn_external:
logger.warn("Some externally hosted files were ignored (use "
"--allow-external to allow).")
if self.need_warn_unverified:
logger.warn("Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow)." %
req.name)
raise DistributionNotFound('No distributions matching the version for %s' % req)
if applicable_versions[0][1] is INSTALLED_VERSION:
# We have an existing version, and its the best version
logger.info('Installed version (%s) is most up-to-date (past versions: %s)'
% (req.satisfied_by.version, ', '.join([version for parsed_version, link, version in applicable_versions[1:]]) or 'none'))
raise BestVersionAlreadyInstalled
if len(applicable_versions) > 1:
logger.info('Using version %s (newest of versions: %s)' %
(applicable_versions[0][2], ', '.join([version for parsed_version, link, version in applicable_versions])))
selected_version = applicable_versions[0][1]
if (selected_version.internal is not None
and not selected_version.internal):
logger.warn("%s an externally hosted file and may be "
"unreliable" % req.name)
if (selected_version.verifiable is not None
and not selected_version.verifiable):
logger.warn("%s is potentially insecure and "
"unverifiable." % req.name)
if selected_version._deprecated_regex:
logger.deprecated(
"1.7",
"%s discovered using a deprecated method of parsing, "
"in the future it will no longer be discovered" % req.name
)
return selected_version
def _find_url_name(self, index_url, url_name, req):
"""Finds the true URL name of a package, when the given name isn't quite correct.
This is usually used to implement case-insensitivity."""
if not index_url.url.endswith('/'):
# Vaguely part of the PyPI API... weird but true.
## FIXME: bad to modify this?
index_url.url += '/'
page = self._get_page(index_url, req)
if page is None:
logger.fatal('Cannot fetch index base URL %s' % index_url)
return
norm_name = normalize_name(req.url_name)
for link in page.links:
base = posixpath.basename(link.path.rstrip('/'))
if norm_name == normalize_name(base):
logger.notify('Real name of requirement %s is %s' % (url_name, base))
return base
return None
def _get_pages(self, locations, req):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors, and adding download/homepage links
"""
all_locations = list(locations)
seen = set()
while all_locations:
location = all_locations.pop(0)
if location in seen:
continue
seen.add(location)
page = self._get_page(location, req)
if page is None:
continue
yield page
for link in page.rel_links():
normalized = normalize_name(req.name).lower()
if (not normalized in self.allow_external
and not self.allow_all_external):
self.need_warn_external = True
logger.debug("Not searching %s for files because external "
"urls are disallowed." % link)
continue
if (link.trusted is not None
and not link.trusted
and not normalized in self.allow_unverified):
logger.debug("Not searching %s for urls, it is an "
"untrusted link and cannot produce safe or "
"verifiable files." % link)
self.need_warn_unverified = True
continue
all_locations.append(link)
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
_egg_info_re = re.compile(r'([a-z0-9_.]+)-([a-z0-9_.-]+)', re.I)
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates"
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search_name):
for link in self._sort_links(links):
for v in self._link_package_versions(link, search_name):
yield v
def _known_extensions(self):
extensions = ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip')
if self.use_wheel:
return extensions + (wheel_ext,)
return extensions
def _link_package_versions(self, link, search_name):
"""
Return an iterable of triples (pkg_resources_version_key,
link, python_version) that can be extracted from the given
link.
Meant to be overridden by subclasses, not called by clients.
"""
platform = get_platform()
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
else:
egg_info, ext = link.splitext()
if not ext:
if link not in self.logged_links:
logger.debug('Skipping link %s; not a file' % link)
self.logged_links.add(link)
return []
if egg_info.endswith('.tar'):
# Special double-extension case:
egg_info = egg_info[:-4]
ext = '.tar' + ext
if ext not in self._known_extensions():
if link not in self.logged_links:
logger.debug('Skipping link %s; unknown archive format: %s' % (link, ext))
self.logged_links.add(link)
return []
if "macosx10" in link.path and ext == '.zip':
if link not in self.logged_links:
logger.debug('Skipping link %s; macosx10 one' % (link))
self.logged_links.add(link)
return []
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
logger.debug('Skipping %s because the wheel filename is invalid' % link)
return []
if wheel.name.lower() != search_name.lower():
logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
return []
if not wheel.supported():
logger.debug('Skipping %s because it is not compatible with this Python' % link)
return []
# This is a dirty hack to prevent installing Binary Wheels from
# PyPI unless it is a Windows or Mac Binary Wheel. This is
# paired with a change to PyPI disabling uploads for the
# same. Once we have a mechanism for enabling support for binary
# wheels on linux that deals with the inherent problems of
# binary distribution this can be removed.
comes_from = getattr(link, "comes_from", None)
if ((
not platform.startswith('win')
and not platform.startswith('macosx')
)
and comes_from is not None
and urlparse.urlparse(comes_from.url).netloc.endswith(
"pypi.python.org")):
if not wheel.supported(tags=supported_tags_noarch):
logger.debug(
"Skipping %s because it is a pypi-hosted binary "
"Wheel on an unsupported platform" % link
)
return []
version = wheel.version
if not version:
version = self._egg_info_matches(egg_info, search_name, link)
if version is None:
logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
return []
if (link.internal is not None
and not link.internal
and not normalize_name(search_name).lower() in self.allow_external
and not self.allow_all_external):
# We have a link that we are sure is external, so we should skip
# it unless we are allowing externals
logger.debug("Skipping %s because it is externally hosted." % link)
self.need_warn_external = True
return []
if (link.verifiable is not None
and not link.verifiable
and not (normalize_name(search_name).lower()
in self.allow_unverified)):
# We have a link that we are sure we cannot verify it's integrity,
# so we should skip it unless we are allowing unsafe installs
# for this requirement.
logger.debug("Skipping %s because it is an insecure and "
"unverifiable file." % link)
self.need_warn_unverified = True
return []
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
logger.debug('Skipping %s because Python version is incorrect' % link)
return []
logger.debug('Found link %s, version: %s' % (link, version))
return [(pkg_resources.parse_version(version),
link,
version)]
def _egg_info_matches(self, egg_info, search_name, link):
match = self._egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s' % link)
return None
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
def _get_page(self, link, req):
return HTMLPage.get_page(link, req,
cache=self.cache,
session=self.session,
)
class PageCache(object):
"""Cache of HTML pages"""
failure_limit = 3
def __init__(self):
self._failures = {}
self._pages = {}
self._archives = {}
def too_many_failures(self, url):
return self._failures.get(url, 0) >= self.failure_limit
def get_page(self, url):
return self._pages.get(url)
def is_archive(self, url):
return self._archives.get(url, False)
def set_is_archive(self, url, value=True):
self._archives[url] = value
def add_page_failure(self, url, level):
self._failures[url] = self._failures.get(url, 0)+level
def add_page(self, urls, page):
for url in urls:
self._pages[url] = page
class HTMLPage(object):
"""Represents one page, along with its URL"""
## FIXME: these regexes are horrible hacks:
_homepage_re = re.compile(r'<th>\s*home\s*page', re.I)
_download_re = re.compile(r'<th>\s*download\s+url', re.I)
_href_re = re.compile('href=(?:"([^"]*)"|\'([^\']*)\'|([^>\\s\\n]*))', re.I|re.S)
def __init__(self, content, url, headers=None, trusted=None):
self.content = content
self.parsed = html5lib.parse(self.content, namespaceHTMLElements=False)
self.url = url
self.headers = headers
self.trusted = trusted
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, req, cache=None, skip_archives=True, session=None):
if session is None:
session = PipSession()
url = link.url
url = url.split('#', 1)[0]
if cache.too_many_failures(url):
return None
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %(scheme)s URL %(link)s' % locals())
return None
if cache is not None:
inst = cache.get_page(url)
if inst is not None:
return inst
try:
if skip_archives:
if cache is not None:
if cache.is_archive(url):
return None
filename = link.filename
for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(url,
session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug('Skipping page %s because of Content-Type: %s' % (link, content_type))
if cache is not None:
cache.set_is_archive(url)
return None
logger.debug('Getting page %s' % url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
# add trailing slash if not present so urljoin doesn't trim final segment
if not url.endswith('/'):
url += '/'
url = urlparse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s' % url)
resp = session.get(url, headers={"Accept": "text/html"})
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement. For instance http://sourceforge.net/projects/docutils/files/docutils/0.8.1/docutils-0.8.1.tar.gz/download
# redirects to http://superb-dca3.dl.sourceforge.net/project/docutils/docutils/0.8.1/docutils-0.8.1.tar.gz
# Unless we issue a HEAD request on every url we cannot know
# ahead of time for sure if something is HTML or not. However we
# can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug('Skipping page %s because of Content-Type: %s' %
(link, content_type))
if cache is not None:
cache.set_is_archive(url)
return None
inst = cls(resp.text, resp.url, resp.headers, trusted=link.trusted)
except requests.HTTPError as exc:
level = 2 if exc.response.status_code == 404 else 1
cls._handle_fail(req, link, exc, url, cache=cache, level=level)
except requests.ConnectionError as exc:
cls._handle_fail(
req, link, "connection error: %s" % exc, url,
cache=cache,
)
except requests.Timeout:
cls._handle_fail(req, link, "timed out", url, cache=cache)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(req, link, reason, url,
cache=cache,
level=2,
meth=logger.notify,
)
else:
if cache is not None:
cache.add_page([url, resp.url], inst)
return inst
@staticmethod
def _handle_fail(req, link, reason, url, cache=None, level=1, meth=None):
if meth is None:
meth = logger.info
meth("Could not fetch URL %s: %s", link, reason)
meth("Will skip URL %s when looking for download links for %s" %
(link.url, req))
if cache is not None:
cache.add_page_failure(url, level)
@staticmethod
def _get_content_type(url, session=None):
"""Get the Content-Type of the given url, using a HEAD request"""
if session is None:
session = PipSession()
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
if not scheme in ('http', 'https', 'ftp', 'ftps'):
## FIXME: some warning or something?
## assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@property
def api_version(self):
if not hasattr(self, "_api_version"):
_api_version = None
metas = [x for x in self.parsed.findall(".//meta")
if x.get("name", "").lower() == "api-version"]
if metas:
try:
_api_version = int(metas[0].get("value", None))
except (TypeError, ValueError):
_api_version = None
self._api_version = _api_version
return self._api_version
@property
def base_url(self):
if not hasattr(self, "_base_url"):
base = self.parsed.find(".//base")
if base is not None and base.get("href"):
self._base_url = base.get("href")
else:
self._base_url = self.url
return self._base_url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self.base_url, href))
# Determine if this link is internal. If that distinction
# doesn't make sense in this context, then we don't make
# any distinction.
internal = None
if self.api_version and self.api_version >= 2:
# Only api_versions >= 2 have a distinction between
# external and internal links
internal = bool(anchor.get("rel")
and "internal" in anchor.get("rel").split())
yield Link(url, self, internal=internal)
def rel_links(self):
for url in self.explicit_rel_links():
yield url
for url in self.scraped_rel_links():
yield url
def explicit_rel_links(self, rels=('homepage', 'download')):
"""Yields all links with the given relations"""
rels = set(rels)
for anchor in self.parsed.findall(".//a"):
if anchor.get("rel") and anchor.get("href"):
found_rels = set(anchor.get("rel").split())
# Determine the intersection between what rels were found and
# what rels were being looked for
if found_rels & rels:
href = anchor.get("href")
url = self.clean_link(urlparse.urljoin(self.base_url, href))
yield Link(url, self, trusted=False)
def scraped_rel_links(self):
# Can we get rid of this horrible horrible method?
for regex in (self._homepage_re, self._download_re):
match = regex.search(self.content)
if not match:
continue
href_match = self._href_re.search(self.content, pos=match.end())
if not href_match:
continue
url = href_match.group(1) or href_match.group(2) or href_match.group(3)
if not url:
continue
url = self.clean_link(urlparse.urljoin(self.base_url, url))
yield Link(url, self, trusted=False, _deprecated_regex=True)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None, internal=None, trusted=None,
_deprecated_regex=False):
self.url = url
self.comes_from = comes_from
self.internal = internal
self.trusted = trusted
self._deprecated_regex = _deprecated_regex
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
return self.url == other.url
def __ne__(self, other):
return self.url != other.url
def __lt__(self, other):
return self.url < other.url
def __le__(self, other):
return self.url <= other.url
def __gt__(self, other):
return self.url > other.url
def __ge__(self, other):
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urlparse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urlparse.urlsplit(self.url)[0]
@property
def path(self):
return urlparse.urlsplit(self.url)[2]
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urlparse.urlsplit(self.url)
return urlparse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)')
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def verifiable(self):
"""
Returns True if this link can be verified after download, False if it
cannot, and None if we cannot determine.
"""
trusted = self.trusted or getattr(self.comes_from, "trusted", None)
if trusted is not None and trusted:
# This link came from a trusted source. It *may* be verifiable but
# first we need to see if this page is operating under the new
# API version.
try:
api_version = getattr(self.comes_from, "api_version", None)
api_version = int(api_version)
except (ValueError, TypeError):
api_version = None
if api_version is None or api_version <= 1:
# This link is either trusted, or it came from a trusted,
# however it is not operating under the API version 2 so
# we can't make any claims about if it's safe or not
return
if self.hash:
# This link came from a trusted source and it has a hash, so we
# can consider it safe.
return True
else:
# This link came from a trusted source, using the new API
# version, and it does not have a hash. It is NOT verifiable
return False
elif trusted is not None:
# This link came from an untrusted source and we cannot trust it
return False
# An object to represent the "link" for the installed version of a requirement.
# Using Inf as the url makes it sort higher.
INSTALLED_VERSION = Link(Inf)
def get_requirement_from_url(url):
"""Get a requirement from the URL, if possible. This looks for #egg
in the URL"""
link = Link(url)
egg_info = link.egg_fragment
if not egg_info:
egg_info = splitext(link.filename)[0]
return package_to_requirement(egg_info)
def package_to_requirement(package_name):
"""Translate a name like Foo-1.2 to Foo==1.3"""
match = re.search(r'^(.*?)-(dev|\d.*)', package_name)
if match:
name = match.group(1)
version = match.group(2)
else:
name = package_name
version = ''
if version:
return '%s==%s' % (name, version)
else:
return name
|
mit
|
vtraag/leidenalg
|
tests/test_VertexPartition.py
|
1
|
8491
|
import unittest
import igraph as ig
import leidenalg
import random
from copy import deepcopy
from ddt import ddt, data, unpack
#%%
def name_object(obj, name):
obj.__name__ = name
return obj
graphs = [
###########################################################################
# Zachary karate network
name_object(ig.Graph.Famous('Zachary'),
'Zachary'),
###########################################################################
# ER Networks
# Undirected no loop
name_object(ig.Graph.Erdos_Renyi(100, p=1./100, directed=False, loops=False),
'ER_k1_undirected_no_loops'),
name_object(ig.Graph.Erdos_Renyi(100, p=5./100, directed=False, loops=False),
'ER_k5_undirected_no_loops'),
# Directed no loop
name_object(ig.Graph.Erdos_Renyi(100, p=1./100, directed=True, loops=False),
'ER_k1_directed_no_loops'),
name_object(ig.Graph.Erdos_Renyi(100, p=5./100, directed=True, loops=False),
'ER_k5_directed_no_loops'),
# Undirected loops
name_object(ig.Graph.Erdos_Renyi(100, p=1./100, directed=False, loops=True),
'ER_k1_undirected_loops'),
name_object(ig.Graph.Erdos_Renyi(100, p=5./100, directed=False, loops=True),
'ER_k5_undirected_loops'),
# Directed loops
name_object(ig.Graph.Erdos_Renyi(100, p=1./100, directed=True, loops=True),
'ER_k1_directed_loops'),
name_object(ig.Graph.Erdos_Renyi(100, p=5./100, directed=True, loops=True),
'ER_k5_directed_loops'),
###########################################################################
# Tree
name_object(ig.Graph.Tree(100, 3, type=ig.TREE_UNDIRECTED),
'Tree_undirected'),
name_object(ig.Graph.Tree(100, 3, type=ig.TREE_OUT),
'Tree_directed_out'),
name_object(ig.Graph.Tree(100, 3, type=ig.TREE_IN),
'Tree_directed_in'),
###########################################################################
# Lattice
name_object(ig.Graph.Lattice([100], nei=3, directed=False, mutual=True, circular=True),
'Lattice_undirected'),
name_object(ig.Graph.Lattice([100], nei=3, directed=True, mutual=False, circular=True),
'Lattice_directed')
]
bipartite_graph = name_object(
ig.Graph.Bipartite([0, 0, 0, 0, 1, 1, 1, 1],
[[0, 4],
[0, 5],
[0, 6],
[1, 4],
[1, 5],
[2, 6],
[2, 7],
[3, 6],
[3, 7],
[3, 5]]),
'bipartite_example')
def make_weighted(G):
m = G.ecount()
G.es['weight'] = [random.random() for i in range(G.ecount())]
G.__name__ += '_weighted'
return G
graphs += [make_weighted(H) for H in graphs]
class BaseTest:
@ddt
class MutableVertexPartitionTest(unittest.TestCase):
def setUp(self):
self.optimiser = leidenalg.Optimiser()
@data(*graphs)
def test_move_nodes(self, graph):
if 'weight' in graph.es.attributes() and self.partition_type == leidenalg.SignificanceVertexPartition:
raise unittest.SkipTest('Significance doesn\'t handle weighted graphs')
if 'weight' in graph.es.attributes():
partition = self.partition_type(graph, weights='weight')
else:
partition = self.partition_type(graph)
for v in range(graph.vcount()):
if graph.degree(v) >= 1:
u = graph.neighbors(v)[0]
diff = partition.diff_move(v, partition.membership[u])
q1 = partition.quality()
partition.move_node(v, partition.membership[u])
q2 = partition.quality()
self.assertAlmostEqual(
q2 - q1,
diff,
places=5,
msg="Difference in quality ({0}) not equal to calculated difference ({1})".format(
q2 - q1, diff))
@data(*graphs)
def test_aggregate_partition(self, graph):
if 'weight' in graph.es.attributes() and self.partition_type != leidenalg.SignificanceVertexPartition:
partition = self.partition_type(graph, weights='weight')
else:
partition = self.partition_type(graph)
self.optimiser.move_nodes(partition)
aggregate_partition = partition.aggregate_partition()
self.assertAlmostEqual(
partition.quality(),
aggregate_partition.quality(),
places=5,
msg='Quality not equal for aggregate partition.')
self.optimiser.move_nodes(aggregate_partition)
partition.from_coarse_partition(aggregate_partition)
self.assertAlmostEqual(
partition.quality(),
aggregate_partition.quality(),
places=5,
msg='Quality not equal from coarser partition.')
@data(*graphs)
def test_total_weight_in_all_comms(self, graph):
if 'weight' in graph.es.attributes() and self.partition_type != leidenalg.SignificanceVertexPartition:
partition = self.partition_type(graph, weights='weight')
else:
partition = self.partition_type(graph)
self.optimiser.optimise_partition(partition)
s = sum([partition.total_weight_in_comm(c) for c,_ in enumerate(partition)])
self.assertAlmostEqual(
s,
partition.total_weight_in_all_comms(),
places=5,
msg='Total weight in all communities ({0}) not equal to the sum of the weight in all communities ({1}).'.format(
s, partition.total_weight_in_all_comms())
)
@data(*graphs)
def test_copy(self, graph):
if 'weight' in graph.es.attributes() and self.partition_type != leidenalg.SignificanceVertexPartition:
partition = self.partition_type(graph, weights='weight')
else:
partition = self.partition_type(graph)
self.optimiser.optimise_partition(partition)
partition2 = deepcopy(partition)
self.assertAlmostEqual(
partition.quality(),
partition2.quality(),
places=5,
msg='Quality of deepcopy ({0}) not equal to quality of original partition ({1}).'.format(
partition.quality(), partition2.quality())
)
if (partition2.membership[0] == 0):
partition2.move_node(0, 1)
else:
partition2.move_node(0, 0)
self.assertNotEqual(
partition.membership[0],
partition2.membership[0],
msg='Moving node 0 in the deepcopy to community {0} results in community membership {1} for node 0 also in original partition.'.format(
partition.membership[0], partition2.membership[0])
)
class ModularityVertexPartitionTest(BaseTest.MutableVertexPartitionTest):
def setUp(self):
super(ModularityVertexPartitionTest, self).setUp()
self.partition_type = leidenalg.ModularityVertexPartition
class RBERVertexPartitionTest(BaseTest.MutableVertexPartitionTest):
def setUp(self):
super(RBERVertexPartitionTest, self).setUp()
self.partition_type = leidenalg.RBERVertexPartition
class RBConfigurationVertexPartitionTest(BaseTest.MutableVertexPartitionTest):
def setUp(self):
super(RBConfigurationVertexPartitionTest, self).setUp()
self.partition_type = leidenalg.RBConfigurationVertexPartition
class CPMVertexPartitionTest(BaseTest.MutableVertexPartitionTest):
def setUp(self):
super(CPMVertexPartitionTest, self).setUp()
self.partition_type = leidenalg.CPMVertexPartition
def test_Bipartite(self):
graph = bipartite_graph
partition, partition_0, partition_1 = \
leidenalg.CPMVertexPartition.Bipartite(graph, resolution_parameter_01=0.2)
self.optimiser.optimise_partition_multiplex(
[partition, partition_0, partition_1],
layer_weights=[1, -1, -1])
self.assertEqual(len(partition), 1)
class SurpriseVertexPartitionTest(BaseTest.MutableVertexPartitionTest):
def setUp(self):
super(SurpriseVertexPartitionTest, self).setUp()
self.partition_type = leidenalg.SurpriseVertexPartition
class SignificanceVertexPartitionTest(BaseTest.MutableVertexPartitionTest):
def setUp(self):
super(SignificanceVertexPartitionTest, self).setUp()
self.partition_type = leidenalg.SignificanceVertexPartition
#%%
if __name__ == '__main__':
#%%
unittest.main(verbosity=3)
suite = unittest.TestLoader().discover('.')
unittest.TextTestRunner(verbosity=1).run(suite)
|
gpl-3.0
|
azverkan/scons
|
test/MSVS/vs-6.0-files.py
|
5
|
2715
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that we can generate Visual Studio 6 project (.dsp) and solution
(.dsw) files that look correct.
"""
import TestSConsMSVS
test = TestSConsMSVS.TestSConsMSVS()
host_arch = test.get_vs_host_arch()
# Make the test infrastructure think we have this version of MSVS installed.
test._msvs_versions = ['6.0']
expected_dspfile = TestSConsMSVS.expected_dspfile_6_0
expected_dswfile = TestSConsMSVS.expected_dswfile_6_0
SConscript_contents = TestSConsMSVS.SConscript_contents_6_0
test.write('SConstruct', SConscript_contents%{'HOST_ARCH': host_arch})
test.run(arguments="Test.dsp")
test.must_exist(test.workpath('Test.dsp'))
dsp = test.read('Test.dsp', 'r')
expect = test.msvs_substitute(expected_dspfile, '6.0', None, 'SConstruct')
# don't compare the pickled data
assert dsp[:len(expect)] == expect, test.diff_substr(expect, dsp)
test.must_exist(test.workpath('Test.dsw'))
dsw = test.read('Test.dsw', 'r')
expect = test.msvs_substitute(expected_dswfile, '6.0', None, 'SConstruct')
assert dsw == expect, test.diff_substr(expect, dsw)
test.run(arguments='-c .')
test.must_not_exist(test.workpath('Test.dsp'))
test.must_not_exist(test.workpath('Test.dsw'))
test.run(arguments='Test.dsp')
test.must_exist(test.workpath('Test.dsp'))
test.must_exist(test.workpath('Test.dsw'))
test.run(arguments='-c Test.dsw')
test.must_not_exist(test.workpath('Test.dsp'))
test.must_not_exist(test.workpath('Test.dsw'))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
mit
|
wdv4758h/ZipPy
|
lib-python/3/test/test_getopt.py
|
173
|
6968
|
# test_getopt.py
# David Goodger <dgoodger@bigfoot.com> 2000-08-19
from test.support import verbose, run_doctest, run_unittest, EnvironmentVarGuard
import unittest
import getopt
sentinel = object()
class GetoptTests(unittest.TestCase):
def setUp(self):
self.env = EnvironmentVarGuard()
if "POSIXLY_CORRECT" in self.env:
del self.env["POSIXLY_CORRECT"]
def tearDown(self):
self.env.__exit__()
del self.env
def assertError(self, *args, **kwargs):
self.assertRaises(getopt.GetoptError, *args, **kwargs)
def test_short_has_arg(self):
self.assertTrue(getopt.short_has_arg('a', 'a:'))
self.assertFalse(getopt.short_has_arg('a', 'a'))
self.assertError(getopt.short_has_arg, 'a', 'b')
def test_long_has_args(self):
has_arg, option = getopt.long_has_args('abc', ['abc='])
self.assertTrue(has_arg)
self.assertEqual(option, 'abc')
has_arg, option = getopt.long_has_args('abc', ['abc'])
self.assertFalse(has_arg)
self.assertEqual(option, 'abc')
has_arg, option = getopt.long_has_args('abc', ['abcd'])
self.assertFalse(has_arg)
self.assertEqual(option, 'abcd')
self.assertError(getopt.long_has_args, 'abc', ['def'])
self.assertError(getopt.long_has_args, 'abc', [])
self.assertError(getopt.long_has_args, 'abc', ['abcd','abcde'])
def test_do_shorts(self):
opts, args = getopt.do_shorts([], 'a', 'a', [])
self.assertEqual(opts, [('-a', '')])
self.assertEqual(args, [])
opts, args = getopt.do_shorts([], 'a1', 'a:', [])
self.assertEqual(opts, [('-a', '1')])
self.assertEqual(args, [])
#opts, args = getopt.do_shorts([], 'a=1', 'a:', [])
#self.assertEqual(opts, [('-a', '1')])
#self.assertEqual(args, [])
opts, args = getopt.do_shorts([], 'a', 'a:', ['1'])
self.assertEqual(opts, [('-a', '1')])
self.assertEqual(args, [])
opts, args = getopt.do_shorts([], 'a', 'a:', ['1', '2'])
self.assertEqual(opts, [('-a', '1')])
self.assertEqual(args, ['2'])
self.assertError(getopt.do_shorts, [], 'a1', 'a', [])
self.assertError(getopt.do_shorts, [], 'a', 'a:', [])
def test_do_longs(self):
opts, args = getopt.do_longs([], 'abc', ['abc'], [])
self.assertEqual(opts, [('--abc', '')])
self.assertEqual(args, [])
opts, args = getopt.do_longs([], 'abc=1', ['abc='], [])
self.assertEqual(opts, [('--abc', '1')])
self.assertEqual(args, [])
opts, args = getopt.do_longs([], 'abc=1', ['abcd='], [])
self.assertEqual(opts, [('--abcd', '1')])
self.assertEqual(args, [])
opts, args = getopt.do_longs([], 'abc', ['ab', 'abc', 'abcd'], [])
self.assertEqual(opts, [('--abc', '')])
self.assertEqual(args, [])
# Much like the preceding, except with a non-alpha character ("-") in
# option name that precedes "="; failed in
# http://python.org/sf/126863
opts, args = getopt.do_longs([], 'foo=42', ['foo-bar', 'foo=',], [])
self.assertEqual(opts, [('--foo', '42')])
self.assertEqual(args, [])
self.assertError(getopt.do_longs, [], 'abc=1', ['abc'], [])
self.assertError(getopt.do_longs, [], 'abc', ['abc='], [])
def test_getopt(self):
# note: the empty string between '-a' and '--beta' is significant:
# it simulates an empty string option argument ('-a ""') on the
# command line.
cmdline = ['-a', '1', '-b', '--alpha=2', '--beta', '-a', '3', '-a',
'', '--beta', 'arg1', 'arg2']
opts, args = getopt.getopt(cmdline, 'a:b', ['alpha=', 'beta'])
self.assertEqual(opts, [('-a', '1'), ('-b', ''),
('--alpha', '2'), ('--beta', ''),
('-a', '3'), ('-a', ''), ('--beta', '')])
# Note ambiguity of ('-b', '') and ('-a', '') above. This must be
# accounted for in the code that calls getopt().
self.assertEqual(args, ['arg1', 'arg2'])
self.assertError(getopt.getopt, cmdline, 'a:b', ['alpha', 'beta'])
def test_gnu_getopt(self):
# Test handling of GNU style scanning mode.
cmdline = ['-a', 'arg1', '-b', '1', '--alpha', '--beta=2']
# GNU style
opts, args = getopt.gnu_getopt(cmdline, 'ab:', ['alpha', 'beta='])
self.assertEqual(args, ['arg1'])
self.assertEqual(opts, [('-a', ''), ('-b', '1'),
('--alpha', ''), ('--beta', '2')])
# recognize "-" as an argument
opts, args = getopt.gnu_getopt(['-a', '-', '-b', '-'], 'ab:', [])
self.assertEqual(args, ['-'])
self.assertEqual(opts, [('-a', ''), ('-b', '-')])
# Posix style via +
opts, args = getopt.gnu_getopt(cmdline, '+ab:', ['alpha', 'beta='])
self.assertEqual(opts, [('-a', '')])
self.assertEqual(args, ['arg1', '-b', '1', '--alpha', '--beta=2'])
# Posix style via POSIXLY_CORRECT
self.env["POSIXLY_CORRECT"] = "1"
opts, args = getopt.gnu_getopt(cmdline, 'ab:', ['alpha', 'beta='])
self.assertEqual(opts, [('-a', '')])
self.assertEqual(args, ['arg1', '-b', '1', '--alpha', '--beta=2'])
def test_libref_examples(self):
s = """
Examples from the Library Reference: Doc/lib/libgetopt.tex
An example using only Unix style options:
>>> import getopt
>>> args = '-a -b -cfoo -d bar a1 a2'.split()
>>> args
['-a', '-b', '-cfoo', '-d', 'bar', 'a1', 'a2']
>>> optlist, args = getopt.getopt(args, 'abc:d:')
>>> optlist
[('-a', ''), ('-b', ''), ('-c', 'foo'), ('-d', 'bar')]
>>> args
['a1', 'a2']
Using long option names is equally easy:
>>> s = '--condition=foo --testing --output-file abc.def -x a1 a2'
>>> args = s.split()
>>> args
['--condition=foo', '--testing', '--output-file', 'abc.def', '-x', 'a1', 'a2']
>>> optlist, args = getopt.getopt(args, 'x', [
... 'condition=', 'output-file=', 'testing'])
>>> optlist
[('--condition', 'foo'), ('--testing', ''), ('--output-file', 'abc.def'), ('-x', '')]
>>> args
['a1', 'a2']
"""
import types
m = types.ModuleType("libreftest", s)
run_doctest(m, verbose)
def test_issue4629(self):
longopts, shortopts = getopt.getopt(['--help='], '', ['help='])
self.assertEqual(longopts, [('--help', '')])
longopts, shortopts = getopt.getopt(['--help=x'], '', ['help='])
self.assertEqual(longopts, [('--help', 'x')])
self.assertRaises(getopt.GetoptError, getopt.getopt, ['--help='], '', ['help'])
def test_main():
run_unittest(GetoptTests)
if __name__ == "__main__":
test_main()
|
bsd-3-clause
|
joshfriend/memegen
|
tests/test_routes_templates.py
|
1
|
2217
|
# pylint: disable=unused-variable
# pylint: disable=misplaced-comparison-constant
from .conftest import load
def describe_get():
def when_default_text(client):
response = client.get("/templates/iw")
assert 200 == response.status_code
assert dict(
name="Insanity Wolf",
description="http://knowyourmeme.com/memes/insanity-wolf",
aliases=['insanity', 'insanity-wolf', 'iw'],
styles=[],
example="http://localhost/iw/does-testing/in-production",
) == load(response)
def when_no_default_text(client):
response = client.get("/templates/keanu")
assert 200 == response.status_code
assert "http://localhost/keanu/your-text/goes-here" == \
load(response)['example']
def when_alternate_sytles_available(client):
response = client.get("/templates/sad-biden")
assert 200 == response.status_code
assert ['down', 'scowl', 'window'] == load(response)['styles']
def when_dashes_in_key(client):
response = client.get("/templates/awkward-awesome")
assert 200 == response.status_code
def it_returns_list_when_no_key(client):
response = client.get("/templates/")
assert 200 == response.status_code
data = load(response)
assert "http://localhost/templates/iw" == data['Insanity Wolf']
assert len(data) >= 20 # there should be many memes
def it_redirects_when_text_is_provided(client):
response = client.get("/templates/iw/top/bottom")
assert 302 == response.status_code
assert '<a href="/iw/top/bottom">' in load(response, as_json=False)
def it_redirects_when_key_is_an_alias(client):
response = client.get("/templates/insanity-wolf")
assert 302 == response.status_code
assert '<a href="/templates/iw">' in load(response, as_json=False)
def describe_post():
def it_returns_an_error(client):
response = client.post("/templates/")
assert 403 == response.status_code
assert dict(
message="https://raw.githubusercontent.com/jacebrowning/memegen/master/CONTRIBUTING.md"
) == load(response)
|
mit
|
markeTIC/OCB
|
addons/account/tests/test_account_move_closed_period.py
|
136
|
1615
|
from datetime import date
from openerp.tests.common import TransactionCase
from openerp.osv.orm import except_orm
class TestPeriodState(TransactionCase):
"""
Forbid creation of Journal Entries for a closed period.
"""
def setUp(self):
super(TestPeriodState, self).setUp()
cr, uid = self.cr, self.uid
self.wizard_period_close = self.registry('account.period.close')
self.wizard_period_close_id = self.wizard_period_close.create(cr, uid, {'sure': 1})
_, self.sale_journal_id = self.registry("ir.model.data").get_object_reference(cr, uid, "account", "sales_journal")
_, self.period_id = self.registry("ir.model.data").get_object_reference(cr, uid, "account", "period_0")
def test_period_state(self):
cr, uid = self.cr, self.uid
self.wizard_period_close.data_save(cr, uid, [self.wizard_period_close_id], {
'lang': 'en_US',
'active_model': 'account.period',
'active_ids': [self.period_id],
'tz': False,
'active_id': self.period_id
})
with self.assertRaises(except_orm):
self.registry('account.move').create(cr, uid, {
'name': '/',
'period_id': self.period_id,
'journal_id': self.sale_journal_id,
'date': date.today(),
'line_id': [(0, 0, {
'name': 'foo',
'debit': 10,
}), (0, 0, {
'name': 'bar',
'credit': 10,
})]
})
|
agpl-3.0
|
ahmednuaman/django-angular
|
setup.py
|
13
|
1246
|
import os
from setuptools import setup, find_packages
from djangular import __version__
DESCRIPTION = 'Let Django play well with AngularJS'
CLASSIFIERS = [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
]
def read(fname):
readme_file = os.path.join(os.path.dirname(__file__), fname)
return os.popen('[ -x "$(which pandoc 2>/dev/null)" ] && pandoc -t rst {0} || cat {0}'.format(readme_file)).read()
setup(
name='django-angular',
version=__version__,
author='Jacob Rief',
author_email='jacob.rief@gmail.com',
description=DESCRIPTION,
long_description=read('README.md'),
url='https://github.com/jrief/django-angular',
license='MIT',
keywords=['django', 'angularjs'],
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
packages=find_packages(exclude=['examples', 'docs']),
include_package_data=True,
)
|
mit
|
zahanm/foodpedia
|
django/conf/locale/sk/formats.py
|
232
|
1288
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'G:i:s'
DATETIME_FORMAT = 'j. F Y G:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
|
bsd-3-clause
|
Duke-NSOE/GeoHAT
|
GeoHat_V10/Scripts/networkx/tests/benchmark.py
|
37
|
10636
|
from timeit import Timer
# This is gratefully modeled after the benchmarks found in
# the numpy svn repository. http://svn.scipy.org/svn/numpy/trunk
class Benchmark(object):
"""
Benchmark a method or simple bit of code using different Graph classes.
If the test code is the same for each graph class, then you can set it
during instantiation through the argument test_string.
The argument test_string can also be a tuple of test code and setup code.
The code is entered as a string valid for use with the timeit module.
Example:
>>> b=Benchmark(['Graph','XGraph'])
>>> b['Graph']=('G.add_nodes_from(nlist)','nlist=range(100)')
>>> b.run()
"""
def __init__(self,graph_classes,title='',test_string=None,runs=3,reps=1000):
self.runs = runs
self.reps = reps
self.title = title
self.class_tests = dict((gc,'') for gc in graph_classes)
# set up the test string if it is the same for all classes.
if test_string is not None:
if isinstance(test_string,tuple):
self['all']=test_string
else:
self['all']=(test_string,'')
def __setitem__(self,graph_class,some_strs):
"""
Set a simple bit of code and setup string for the test.
Use this for cases where the code differs from one class to another.
"""
test_str, setup_str = some_strs
if graph_class == 'all':
graph_class = self.class_tests.keys()
elif not isinstance(graph_class,list):
graph_class = [graph_class]
for GC in graph_class:
setup_string='import networkx as NX\nG=NX.%s.%s()\n'%\
(GC.lower(),GC) + setup_str
self.class_tests[GC] = Timer(test_str, setup_string)
def run(self):
"""Run the benchmark for each class and print results."""
column_len = max(len(G) for G in self.class_tests)
print('='*72)
if self.title:
print("%s: %s runs, %s reps"% (self.title,self.runs,self.reps))
print('='*72)
times=[]
for GC,timer in self.class_tests.items():
name = GC.ljust(column_len)
try:
t=sum(timer.repeat(self.runs,self.reps))/self.runs
# print "%s: %s" % (name, timer.repeat(self.runs,self.reps))
times.append((t,name))
except Exception as e:
print("%s: Failed to benchmark (%s)." % (name,e))
times.sort()
tmin=times[0][0]
for t,name in times:
print("%s: %5.2f %s" % (name, t/tmin*100.,t))
print('-'*72)
print()
if __name__ == "__main__":
# set up for all routines:
classes=['Graph','MultiGraph','DiGraph','MultiDiGraph']
all_tests=['add_nodes','add_edges','remove_nodes','remove_edges',\
'neighbors','edges','degree','dijkstra','shortest path',\
'subgraph','edgedata_subgraph','laplacian']
# Choose which tests to run
tests=all_tests
tests=['subgraph','edgedata_subgraph']
#tests=all_tests[-1:]
N=100
if 'add_nodes' in tests:
title='Benchmark: Adding nodes'
test_string=('G.add_nodes_from(nlist)','nlist=range(%i)'%N)
b=Benchmark(classes,title,test_string,runs=3,reps=1000)
b.run()
if 'add_edges' in tests:
title='Benchmark: Adding edges'
setup='elist=[(i,i+3) for i in range(%s-3)]\nG.add_nodes_from(range(%i))'%(N,N)
test_string=('G.add_edges_from(elist)',setup)
b=Benchmark(classes,title,test_string,runs=3,reps=1000)
b.run()
if 'remove_nodes' in tests:
title='Benchmark: Adding and Deleting nodes'
setup='nlist=range(%i)'%N
test_string=('G.add_nodes_from(nlist)\nG.remove_nodes_from(nlist)',setup)
b=Benchmark(classes,title,test_string,runs=3,reps=1000)
b.run()
if 'remove_edges' in tests:
title='Benchmark: Adding and Deleting edges'
setup='elist=[(i,i+3) for i in range(%s-3)]'%N
test_string=('G.add_edges_from(elist)\nG.remove_edges_from(elist)',setup)
b=Benchmark(classes,title,test_string,runs=3,reps=1000)
b.run()
if 'neighbors' in tests:
N=500
p=0.3
title='Benchmark: reporting neighbors'
b=Benchmark(classes,title,runs=3,reps=1)
test_string='for n in G:\n for nbr in G.neighbors(n):\n pass'
all_setup='H=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
setup=all_setup+'G.add_edge(u,v)\n'
if 'Graph' in classes: b['Graph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
setup=all_setup+'G.add_edge(u,v)'
if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
b.run()
if 'edges' in tests:
N=500
p=0.3
title='Benchmark: reporting edges'
b=Benchmark(classes,title,runs=3,reps=1)
test_string='for n in G:\n for e in G.edges(n):\n pass'
all_setup='H=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
setup=all_setup+'G.add_edge(u,v)\n'
if 'Graph' in classes: b['Graph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
setup=all_setup+'G.add_edge(u,v)'
if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
b.run()
if 'degree' in tests:
N=500
p=0.3
title='Benchmark: reporting degree'
b=Benchmark(classes,title,runs=3,reps=1)
test_string='for d in G.degree():\n pass'
all_setup='H=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
setup=all_setup+'G.add_edge(u,v)\n'
if 'Graph' in classes: b['Graph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
setup=all_setup+'G.add_edge(u,v)'
if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
b.run()
if 'dijkstra' in tests:
N=500
p=0.3
title='dijkstra single source shortest path'
b=Benchmark(classes,title,runs=3,reps=1)
test_string='p=NX.single_source_dijkstra(G,i)'
all_setup='i=6\nH=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
setup=all_setup+'G.add_edge(u,v)'
if 'Graph' in classes: b['Graph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
setup=all_setup+'G.add_edge(u,v)'
if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
b.run()
if 'shortest path' in tests:
N=500
p=0.3
title='single source shortest path'
b=Benchmark(classes,title,runs=3,reps=1)
test_string='p=NX.single_source_shortest_path(G,i)'
all_setup='i=6\nH=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
setup=all_setup+'G.add_edge(u,v)'
if 'Graph' in classes: b['Graph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
setup=all_setup+'G.add_edge(u,v)'
if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
b.run()
if 'subgraph' in tests:
N=500
p=0.3
title='subgraph method'
b=Benchmark(classes,title,runs=3,reps=1)
test_string='G.subgraph(nlist)'
all_setup='nlist=range(100,150)\nH=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
setup=all_setup+'G.add_edge(u,v)'
if 'Graph' in classes: b['Graph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
setup=all_setup+'G.add_edge(u,v)'
if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
b.run()
if 'edgedata_subgraph' in tests:
N=500
p=0.3
title='subgraph method with edge data present'
b=Benchmark(classes,title,runs=3,reps=1)
test_string='G.subgraph(nlist)'
all_setup='nlist=range(100,150)\nH=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
setup=all_setup+'G.add_edge(u,v,hi=3)'
if 'Graph' in classes: b['Graph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)],hi=2)'
if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
setup=all_setup+'G.add_edge(u,v,hi=1)'
if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)],hi=2)'
if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
b.run()
if 'laplacian' in tests:
N=500
p=0.3
title='creation of laplacian matrix'
b=Benchmark(classes,title,runs=3,reps=1)
test_string='NX.laplacian(G)'
all_setup='H=NX.binomial_graph(%s,%s)\nfor (u,v) in H.edges_iter():\n '%(N,p)
setup=all_setup+'G.add_edge(u,v)'
if 'Graph' in classes: b['Graph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'DiGraph' in classes: b['DiGraph']=(test_string,setup)
setup=all_setup+'G.add_edge(u,v)'
if 'MultiGraph' in classes: b['MultiGraph']=(test_string,setup)
setup=all_setup+'G.add_edges_from([(u,v),(v,u)])'
if 'MultiDiGraph' in classes: b['MultiDiGraph']=(test_string,setup)
b.run()
|
cc0-1.0
|
aquavitae/rst2pdf
|
rst2pdf/tests/testcases/sphinx-issue284/sphinx/conf.py
|
4
|
8455
|
# -*- coding: utf-8 -*-
#
# Foobar documentation build configuration file, created by
# sphinx-quickstart on Wed Nov 11 10:37:39 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.graphviz', 'rst2pdf.pdfbuilder']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Foobar'
copyright = '2009, Jason S'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.1'
# The full version, including alpha/beta/rc tags.
release = '1.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Foobardoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('foobar', 'Foobar.tex', 'Foobar Documentation',
'Jason S', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
graphviz_dot='C:/appl/graphics/graphviz/2.24/bin/dot.exe'
# -- Options for PDF output --------------------------------------------------
# Grouping the document tree into PDF files. List of tuples
# (source start file, target name, title, author, options).
#
# If there is more than one author, separate them with \\.
# For example: r'Guido van Rossum\\Fred L. Drake, Jr., editor'
#
# The options element is a dictionary that lets you override
# this config per-document.
# For example,
# ('index', u'MyProject', u'My Project', u'Author Name',
# dict(pdf_compressed = True))
# would mean that specific document would be compressed
# regardless of the global pdf_compressed setting.
pdf_documents = [
('index', 'index', 'index', 'lorenzo'),
]
# A comma-separated list of custom stylesheets. Example:
pdf_stylesheets = ['sphinx']
# Create a compressed PDF
# Use True/False or 1/0
# Example: compressed=True
#pdf_compressed = False
# A colon-separated list of folders to search for fonts. Example:
# pdf_font_path = ['/usr/share/fonts', '/usr/share/texmf-dist/fonts/']
# Language to be used for hyphenation support
pdf_language = "en_US"
# Mode for literal blocks wider than the frame. Can be
# overflow, shrink or truncate
#pdf_fit_mode = "shrink"
# Section level that forces a break page.
# For example: 1 means top-level sections start in a new page
# 0 means disabled
pdf_break_level = 1
# When a section starts in a new page, force it to be 'even', 'odd',
# or just use 'any'
pdf_breakside = 'odd'
# Insert footnotes where they are defined instead of
# at the end.
#pdf_inline_footnotes = True
# verbosity level. 0 1 or 2
pdf_verbosity = 0
# If false, no index is generated.
pdf_use_index = False
# If false, no modindex is generated.
#pdf_use_modindex = True
# If false, no coverpage is generated.
pdf_use_coverpage = False
# Documents to append as an appendix to all manuals.
#pdf_appendices = []
# Enable experimental feature to split table cells. Use it
# if you get "DelayedTable too big" errors
#pdf_splittables = False
pdf_invariant = True
# Set the date for testing
today = '12/02/2014'
|
mit
|
IEMLdev/ieml-api
|
ieml/usl/decoration/path.py
|
2
|
20479
|
from collections import defaultdict
from itertools import chain
from ieml.commons import OrderedEnum, monitor_decorator
from ieml.dictionary.script import Script
from ieml.usl.constants import FLEXION_SCRIPTS
from ieml.usl.syntagmatic_function import SyntagmaticFunction, SyntagmaticRole
from ieml.usl.usl import USL
from ieml.usl.polymorpheme import PolyMorpheme
from ieml.usl.lexeme import Lexeme
from ieml.usl.variation import PolyMorphemeVariation
from ieml.usl.word import Word
class DeferenceError(KeyError):
pass
SEPARATOR = '>'
def path(string) -> 'UslPath':
from ieml.usl.decoration.parser.parser import PathParser
return PathParser().parse(string)
class UslPath:
USL_TYPE = USL
def __init__(self, child=None):
if child is not None:
if not isinstance(child, UslPath):
raise ValueError("Invalid path child, expected a UslPath, got a "+ child.__class__.__type__)
self.child = child
def _deference(self, usl):
return usl
def has_prefix(self, prefix: 'UslPath'):
if prefix is None:
return True
if self.__class__ != prefix.__class__ or (self.__class__ == prefix.__class__ and not self._do_eq(prefix)):
return self._has_prefix(prefix)
if self.child is None:
return prefix.child is None
return self.child.has_prefix(prefix.child)
def _has_prefix(self, other):
return False
def as_constant(self, u=None):
return UslPath(child=None if self.child is None else self.child.as_constant(u))
def without_morpheme(self):
return UslPath(child=None if self.child is None else self.child.without_morpheme())
def concat(self, suffix: 'UslPath', force: bool=False) -> 'UslPath':
if self.__class__ == UslPath:
return suffix
if suffix.__class__ == UslPath:
return self
if not isinstance(suffix, UslPath):
raise ValueError("Invalid suffix to concat, got "+ suffix.__class__.__name__)
if self.__class__ != suffix.__class__:
if self.child is None:
return self.clone(child=suffix, use_child=True)
else:
return self.clone(child=self.child.concat(suffix, force=force), use_child=True)
else:
if not suffix.has_prefix(self) and not force:
return self.clone()
return suffix
def remove_prefix(self, prefix: 'UslPath'):
if not self.has_prefix(prefix):
return None
if prefix is None:
return self
if not isinstance(self, prefix.__class__) or not self._do_eq(prefix):
return None
if self.child is None:
return None
return self.child.remove_prefix(prefix.child)
@property
def tail(self):
last = self
while last.child is not None:
last = last.child
return last
def deference(self, usl: USL) -> USL:
from ieml.usl.decoration.instance import InstancedUSL
if isinstance(usl, InstancedUSL):
usl = usl.usl
if not isinstance(usl, self.USL_TYPE):
raise DeferenceError("Invalid Usl type for a " + self.__class__.__name__ + \
", expected a " + self.USL_TYPE.__name__ + \
", got a " + usl.__class__.__name__)
node = self._deference(usl)
if self.child is not None:
return self.child.deference(node)
else:
return node
def contained(self, usl):
try:
self.deference(usl)
return True
except DeferenceError:
return False
def _to_str(self):
return ''
def __str__(self):
return SEPARATOR + self._to_str() + (str(self.child) if self.child is not None else '')
@staticmethod
def _from_string(string, children):
return UslPath()
@classmethod
def from_string(cls, string):
if string == SEPARATOR:
return UslPath._from_string(string, None)
split = string.split(SEPARATOR)
return cls._from_string(split[1], split[2:])
def __eq__(self, other):
return isinstance(other, UslPath) and str(other) == str(self)
def __lt__(self, other):
if other.__class__ != self.__class__:
return self.USL_TYPE.syntactic_level < other.USL_TYPE.syntactic_level
else:
return self._do_lt(other)
def _do_eq(self, other):
return True
def _do_lt(self, other):
return False
def __hash__(self):
return hash(str(self))
def clone(self, use_child=False, child=None):
return UslPath(child=(child if use_child else self.child))
def no_child_clone(self):
return self.clone(use_child=True, child=None)
@classmethod
def build_usl_from_path_to_node(cls, path_to_node):
raise NotImplementedError()
@property
def is_constant_path(self):
if self.child is not None:
return self._is_constant_path and self.child.is_constant_path
else:
return self._is_constant_path
@property
def _is_constant_path(self):
return False
def split_tail(self):
if self.child is None:
return (UslPath(), self)
elif self.child.child is None:
return (self.no_child_clone(), self.child)
else:
p, tail = self.child.split_tail()
return (self.clone(use_child=True, child=p), tail)
class GroupIndex(OrderedEnum):
CONSTANT = -1
GROUP_0 = 0
GROUP_1 = 1
GROUP_2 = 2
class PolymorphemePath(UslPath):
USL_TYPE = PolyMorpheme
def __init__(self, group_idx: GroupIndex, morpheme: Script=None, multiplicity=None, child=None):
super().__init__(child=child)
self.group_idx = group_idx
self.morpheme = morpheme
self.multiplicity = multiplicity
@property
def _is_constant_path(self):
return self.group_idx == GroupIndex.CONSTANT
def _do_eq(self, other):
return self.group_idx == other.group_idx and self.morpheme == other.morpheme and \
self.multiplicity == other.multiplicity
def _has_prefix(self, other):
return self.group_idx == other.group_idx and self.multiplicity == other.multiplicity
def as_constant(self, u=None):
return PolymorphemePath(group_idx=GroupIndex.CONSTANT,
morpheme=self.morpheme if self.morpheme is not None else u,
multiplicity=None,
child=(None if self.child is None else self.child.as_constant(u)))
def _do_lt(self, other):
return (self.group_idx, self.morpheme) < (other.group_idx, other.morpheme)
def without_morpheme(self):
return PolymorphemePath(group_idx=self.group_idx,
morpheme=None,
multiplicity=self.multiplicity,
child=(None if self.child is None else self.child.without_morpheme()))
def _deference(self: 'PolymorphemePath', usl: PolyMorpheme):
if self.group_idx != GroupIndex.CONSTANT and self.group_idx.value >= len(usl.groups):
raise DeferenceError("Group index " + str(self.group_idx.name) + " not in polymorpheme")
if self.morpheme is not None:
if self.group_idx == GroupIndex.CONSTANT:
group = usl.constant
else:
group = usl.groups[self.group_idx.value][0]
if not self.morpheme.empty and self.morpheme not in group:
raise DeferenceError("Morpheme " + str(self.morpheme) + " not in group at " + str(self.group_idx.name))
return self.morpheme
else:
if self.group_idx == GroupIndex.CONSTANT:
return PolyMorpheme(constant=usl.constant)
else:
return PolyMorpheme(groups=[usl.groups[self.group_idx.value]])
def _to_str(self):
if self.group_idx == GroupIndex.CONSTANT:
if self.morpheme is not None:
return 'constant' + SEPARATOR + str(self.morpheme)
else:
return 'constant'
else:
if self.morpheme is not None:
return 'group_{}'.format(self.group_idx.value) + \
(' {}'.format(self.multiplicity) if self.multiplicity is not None else '') + \
SEPARATOR + str(self.morpheme)
else:
return 'group_{}'.format(self.group_idx.value) + \
(' {}'.format(self.multiplicity) if self.multiplicity is not None else '')
@staticmethod
def _from_string(elem, children):
if elem == '':
return UslPath()
key = elem
morph = None
if len(children) == 1:
from ieml.usl.usl import usl
morph = usl(children[0])
idx = None
multiplicity = None
if key.startswith('constant'):
idx = GroupIndex.CONSTANT
elif key.startswith('group_'):
if ' ' in key:
key_, multi = key.split(' ')
multiplicity = int(multi)
else:
key_ = key
n = int(''.join(key_[6:]))
if n == 0:
idx = GroupIndex.GROUP_0
elif n == 1:
idx = GroupIndex.GROUP_1
elif n == 2:
idx = GroupIndex.GROUP_2
else:
raise ValueError("Invalid argument index for a PolymorphemePath _from_string constructor: " + str(n))
else:
raise ValueError("Invalid argument for a PolymorphemePath _from_string constructor: " + key)
return PolymorphemePath(group_idx=idx, morpheme=morph, multiplicity=multiplicity)
def clone(self, use_child=False, child=None):
return PolymorphemePath(group_idx=self.group_idx, morpheme=self.morpheme,
multiplicity=self.multiplicity, child=(child if use_child else self.child))
@classmethod
def build_usl_from_path_to_node(cls, path_to_node):
"""
path_to_node: dict PolymorphemePath -> Script
# TODO handle multiplicity
:param path_to_node:
:return:
"""
expend_values = lambda e: [e] if isinstance(e, Script) else e.constant
constant = []
groups = [[[], None],[[], None],[[], None]]
for k, values in path_to_node.items():
if not isinstance(k, PolymorphemePath):
raise ValueError("invalid path type to instantiate a polymorphem " + k.__class__.__name__)
if k.group_idx == GroupIndex.CONSTANT:
constant.extend(chain.from_iterable(map(expend_values, values)))
else:
if k.group_idx == GroupIndex.GROUP_0:
idx = 0
elif k.group_idx == GroupIndex.GROUP_1:
idx = 1
elif k.group_idx == GroupIndex.GROUP_2:
idx = 2
else:
raise ValueError("Invalid polymorpheme path " + str(k))
groups[idx][0].extend(chain.from_iterable(map(expend_values, values)))
if k.multiplicity:
if groups[idx][1] is not None and k.multiplicity != groups[idx][1]:
raise ValueError("Incoherent multiplicity across the path")
groups[idx][1] = k.multiplicity
else:
groups[idx][1] = 1
groups = [g for g in groups if g[0] != []]
return PolyMorpheme(constant=constant, groups=groups)
class FlexionPath(UslPath):
USL_TYPE = PolyMorpheme
def __init__(self, morpheme, child=None):
super().__init__(child=child)
from ieml.dictionary.script import Script
assert isinstance(morpheme, Script), morpheme.__class__.__name__
self.morpheme = morpheme
@property
def _is_constant_path(self):
return True
def as_constant(self, u=None):
return FlexionPath(morpheme=self.morpheme,
child=(None if self.child is None else self.child.as_constant(u)))
def without_morpheme(self):
raise ValueError("Unable to create a flexion path without a morpheme")
# return FlexionPath(morpheme=None,
# child=(None if self.child is None else self.child.without_morpheme()))
def _do_eq(self, other):
return self.morpheme == other.morpheme
def _do_lt(self, other):
return self.morpheme < other.morpheme
def _deference(self: 'FlexionPath', usl: PolyMorpheme):
all_group = [usl.constant, *(list(filter(lambda m: not m.empty, g)) for g, _ in usl.groups)]
all_morphemes = {str(w): w for g in all_group for w in g}
if self.morpheme not in all_morphemes:
raise DeferenceError("Morpheme " + str(self.morpheme) + " not in flexion")
return self.morpheme
def _to_str(self):
return str(self.morpheme)
@staticmethod
def _from_string(elem, children):
if elem == '':
return UslPath()
from ieml.usl.usl import usl
morph = usl(elem)
assert len(children) == 0
return FlexionPath(morpheme=morph)
def clone(self, use_child=False, child=None):
return FlexionPath(morpheme=self.morpheme, child=(child if use_child else self.child))
@classmethod
def build_usl_from_path_to_node(cls, path_to_node):
all_morphemes = []
for k, values in path_to_node.items():
if not isinstance(k, FlexionPath):
raise ValueError("invalid path type to instantiate a polymorpheme " + k.__class__.__name__)
all_morphemes.extend(values)
root_flexion_groups = defaultdict(set)
for m in all_morphemes:
for r in FLEXION_SCRIPTS:
if m in r.singular_sequences_set:
root_flexion_groups[r].add(m)
constant = []
groups = []
for k, v in root_flexion_groups.items():
if len(v) == 0:
continue
elif len(v) == 1:
constant.append(next(iter(v)))
else:
groups.append([list(v), 1])
return PolyMorpheme(constant=constant, groups=groups)
class LexemeIndex(OrderedEnum):
CONTENT = 0
FLEXION = 1
class LexemePath(UslPath):
USL_TYPE = Lexeme
def __init__(self, index: LexemeIndex, child=None):
super().__init__(child=child)
assert isinstance(index, LexemeIndex)
self.index = index
if child is not None and not child.__class__ == UslPath:
if self.index == LexemeIndex.CONTENT:
assert isinstance(self.child, PolymorphemePath), \
"Invalid path structure, a lexeme content child must be a PolymorphemePath, not a " + self.child.__class__.__name__
else:
assert isinstance(self.child, FlexionPath), \
"Invalid path structure, a lexeme flexion child must be a FlexionPath, not a " + self.child.__class__.__name__
def as_constant(self, u=None):
return LexemePath(index=self.index,
child=(None if self.child is None else self.child.as_constant(u)))
def without_morpheme(self):
return LexemePath(index=self.index,
child=(None if self.child is None or self.index == LexemeIndex.FLEXION else self.child.without_morpheme()))
@property
def _is_constant_path(self):
return True
def _do_eq(self, other):
return self.index == other.index
def _do_lt(self, other):
return self.index < other.index
def _deference(self: 'LexemePath', usl: Lexeme):
if self.index == LexemeIndex.CONTENT:
return usl.pm_content
else:
return usl.pm_flexion
def _to_str(self):
if self.index == LexemeIndex.CONTENT:
return 'content'
else:
return 'flexion'
@staticmethod
def _from_string(elem, children):
if elem == '':
return UslPath()
idx = None
if elem == 'content':
idx = LexemeIndex.CONTENT
elif elem == 'flexion':
idx = LexemeIndex.FLEXION
else:
raise ValueError("Invalid argument for a LexemePath _from_string constructor: " + elem)
child = None
if len(children) != 0:
if idx == LexemeIndex.CONTENT:
child = PolymorphemePath._from_string(children[0], children[1:])
else:
child = FlexionPath._from_string(children[0], children[1:])
return LexemePath(index=idx, child=child)
def clone(self, use_child=False, child=None):
if child is not None:
if self.index == LexemeIndex.CONTENT:
if not isinstance(child, PolymorphemePath):
raise ValueError("Can't cast to polymorpheme path")
else:
if isinstance(child, PolymorphemePath):
if child.morpheme is not None:
child = FlexionPath(morpheme=child.morpheme)
else:
child = None
elif not isinstance(child, FlexionPath):
raise ValueError("Can't cast to flexion path")
return LexemePath(index=self.index, child=(child if use_child else self.child))
@classmethod
def build_usl_from_path_to_node(cls, path_to_node):
pm_content = None
pm_flexion = None
for k, v in path_to_node.items():
if not isinstance(k, LexemePath):
raise ValueError("invalid path type to instantiate a Lexeme " + k.__class__.__name__)
if k.index == LexemeIndex.CONTENT:
if pm_content is not None:
raise ValueError("Multiple candidates for the content of the Lexeme")
if not isinstance(v, PolyMorpheme):
pm_content = PolyMorpheme(constant=v)
else:
pm_content = v
elif k.index == LexemeIndex.FLEXION:
if pm_flexion is not None:
raise ValueError("Multiple candidates for the flexion of the Lexeme")
if not isinstance(v, PolyMorpheme):
pm_flexion = FlexionPath.build_usl_from_path_to_node({FlexionPath(morpheme=ss): [ss] for ss in v})
else:
pm_flexion = v
if pm_content is None:
pm_content = PolyMorpheme([])
if pm_flexion is None:
pm_flexion = PolyMorpheme([])
return Lexeme(pm_content=pm_content, pm_flexion=pm_flexion)
class RolePath(UslPath):
USL_TYPE = Word
def __init__(self, role, has_focus=False, child=None):
super().__init__(child)
from ieml.usl.syntagmatic_function import SyntagmaticRole
if not isinstance(role, SyntagmaticRole):
raise DeferenceError("Invalid role for a RolePath " + role.__class__.__name__)
self.role = role
if child is not None and not child.__class__ == UslPath:
if not isinstance(child, LexemePath):
raise DeferenceError("Invalid path structure, a lexeme content child must be a PolymorphemePath, not a " + self.child.__class__.__name__)
self.has_focus = has_focus
def as_constant(self, u=None):
return RolePath(role=self.role,
has_focus=self.has_focus,
child=(None if self.child is None else self.child.as_constant(u)))
def without_morpheme(self):
return RolePath(role=self.role,
has_focus=self.has_focus,
child=(None if self.child is None else self.child.without_morpheme()))
@property
def _is_constant_path(self):
return True
def _do_eq(self, other):
return self.role == other.role and self.has_focus == other.has_focus
def _do_lt(self, other):
return self.role < other.role
def _deference(self: 'RolePath', usl: Word):
try:
return usl.syntagmatic_fun.get(self.role, ignore_prefix=True, ignore_process_valence=True)
except KeyError as key:
raise DeferenceError(key)
def _to_str(self):
return 'role' + SEPARATOR + ('! ' if self.has_focus else '') + str(self.role)
# def __hash__(self):
# """Ignore focus when used as key"""
# return hash( 'role' + SEPARATOR + str(self.role))
@staticmethod
def _from_string(elem, children):
from ieml.usl.syntagmatic_function import SyntagmaticRole
if elem == '':
if len(children) == 0:
return UslPath()
raise ValueError("Empty role in RolePath")
from ieml.usl.usl import usl
sfun_role = SyntagmaticRole([usl(s) for s in elem.split(' ') if s != '!'])
child = None
if len(children) != 0:
child = LexemePath._from_string(children[0], children[1:])
return RolePath(role=sfun_role, has_focus='!' in elem,child=child)
def clone(self, use_child=False, child=None):
return RolePath(role=self.role,
has_focus=self.has_focus, # if use_child and child is not None else False,
child=(child if use_child else self.child))
@classmethod
def build_usl_from_path_to_node(cls, path_to_node):
focus_role = None
for k, v in path_to_node.items():
if not isinstance(k, RolePath):
raise ValueError("invalid path type to instantiate a Word " + k.__class__.__name__)
if k.has_focus:
if focus_role is not None and focus_role != k.role:
raise ValueError("Incoherent focus role, multiple differents values")
focus_role = k
if focus_role is None:
raise ValueError("No focus role defined in this word")
path_to_node_grouped = defaultdict(list)
for k, v in path_to_node.items():
if v is not None:
if focus_role.role == k.role and not k.has_focus:
path_to_node_grouped[focus_role].append(v)
else:
path_to_node_grouped[k].append(v)
path_to_node_res = {}
for k, v in path_to_node_grouped.items():
if len(v) != 1:
raise ValueError('Two many lexemes at role: ' + str(k))
path_to_node_res[k] = v[0]
lex_list = [(k.role.constant, v) for k, v in path_to_node_res.items()]
ctx_type, sfun = SyntagmaticFunction.from_list(lex_list)
return Word(syntagmatic_fun=sfun,
role=focus_role.role,
context_type=ctx_type)
# @monitor_decorator('usl_from_path_values')
def usl_from_path_values(paths_values):
from ieml.usl.decoration.parser.parser import PathParser
from ieml.usl.parser import IEMLParser
path_parser = PathParser()
usl_parser = IEMLParser()
path_to_value = {path_parser.parse(p): set() for p, _ in paths_values}
for p, v in paths_values:
path_to_value[path_parser.parse(p)].add(usl_parser.parse(v))
Tree = lambda: defaultdict(Tree)
bins = Tree()
def recursive_group_by(bin, path, values):
p_cloned = path.no_child_clone()
if 'type' in bin:
if not isinstance(path, bin['type']):
raise ValueError("Inconsistent path system")
else:
bin['type'] = path.__class__
if path.child is None:
bin[p_cloned]["node"] = values
else:
recursive_group_by(bin[p_cloned], path.child, values)
def build_nodes(bin):
if 'node' not in bin:
path_to_node = {}
for p, bin_child in bin.items():
if isinstance(p, UslPath):
path_to_node[p] = build_nodes(bin_child)
assert 'type' in bin
bin['node'] = bin['type'].build_usl_from_path_to_node(path_to_node)
return bin['node']
for p, values in path_to_value.items():
recursive_group_by(bins, p, list(values))
return build_nodes(bins)
|
gpl-3.0
|
Suninus/zulip
|
analytics/management/commands/user_stats.py
|
113
|
1609
|
from __future__ import absolute_import
import datetime
import pytz
from django.core.management.base import BaseCommand
from zerver.models import UserProfile, Realm, Stream, Message
class Command(BaseCommand):
help = "Generate statistics on user activity."
def add_arguments(self, parser):
parser.add_argument('realms', metavar='<realm>', type=str, nargs='*',
help="realm to generate statistics for")
def messages_sent_by(self, user, week):
start = datetime.datetime.now(tz=pytz.utc) - datetime.timedelta(days=(week + 1)*7)
end = datetime.datetime.now(tz=pytz.utc) - datetime.timedelta(days=week*7)
return Message.objects.filter(sender=user, pub_date__gt=start, pub_date__lte=end).count()
def handle(self, *args, **options):
if options['realms']:
try:
realms = [Realm.objects.get(domain=domain) for domain in options['realms']]
except Realm.DoesNotExist, e:
print e
exit(1)
else:
realms = Realm.objects.all()
for realm in realms:
print realm.domain
user_profiles = UserProfile.objects.filter(realm=realm, is_active=True)
print "%d users" % (len(user_profiles),)
print "%d streams" % (len(Stream.objects.filter(realm=realm)),)
for user_profile in user_profiles:
print "%35s" % (user_profile.email,),
for week in range(10):
print "%5d" % (self.messages_sent_by(user_profile, week)),
print ""
|
apache-2.0
|
DashaChuk/PyMySQL
|
pymysql/cursors.py
|
7
|
15432
|
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
import re
import warnings
from ._compat import range_type, text_type, PY2
from . import err
#: Regular expression for :meth:`Cursor.executemany`.
#: executemany only suports simple bulk insert.
#: You can use it to load large dataset.
RE_INSERT_VALUES = re.compile(r"""(INSERT\s.+\sVALUES\s+)(\(\s*%s\s*(?:,\s*%s\s*)*\))(\s*(?:ON DUPLICATE.*)?)\Z""",
re.IGNORECASE | re.DOTALL)
class Cursor(object):
'''
This is the object you use to interact with the database.
'''
#: Max stetement size which :meth:`executemany` generates.
#:
#: Max size of allowed statement is max_allowed_packet - packet_header_size.
#: Default value of max_allowed_packet is 1048576.
max_stmt_length = 1024000
def __init__(self, connection):
'''
Do not create an instance of a Cursor yourself. Call
connections.Connection.cursor().
'''
self.connection = connection
self.description = None
self.rownumber = 0
self.rowcount = -1
self.arraysize = 1
self._executed = None
self._result = None
self._rows = None
def close(self):
'''
Closing a cursor just exhausts all remaining data.
'''
conn = self.connection
if conn is None:
return
try:
while self.nextset():
pass
finally:
self.connection = None
def __enter__(self):
return self
def __exit__(self, *exc_info):
del exc_info
self.close()
def _get_db(self):
if not self.connection:
raise err.ProgrammingError("Cursor closed")
return self.connection
def _check_executed(self):
if not self._executed:
raise err.ProgrammingError("execute() first")
def _conv_row(self, row):
return row
def setinputsizes(self, *args):
"""Does nothing, required by DB API."""
def setoutputsizes(self, *args):
"""Does nothing, required by DB API."""
def _nextset(self, unbuffered=False):
"""Get the next query set"""
conn = self._get_db()
current_result = self._result
if current_result is None or current_result is not conn._result:
return None
if not current_result.has_next:
return None
conn.next_result(unbuffered=unbuffered)
self._do_get_result()
return True
def nextset(self):
return self._nextset(False)
def _escape_args(self, args, conn):
if isinstance(args, (tuple, list)):
return tuple(conn.escape(arg) for arg in args)
elif isinstance(args, dict):
return dict((key, conn.escape(val)) for (key, val) in args.items())
else:
#If it's not a dictionary let's try escaping it anyways.
#Worst case it will throw a Value error
return conn.escape(args)
def mogrify(self, query, args=None):
"""
Returns the exact string that is sent to the database by calling the
execute() method.
This method follows the extension to the DB API 2.0 followed by Psycopg.
"""
conn = self._get_db()
if PY2: # Use bytes on Python 2 always
encoding = conn.encoding
def ensure_bytes(x):
if isinstance(x, unicode):
x = x.encode(encoding)
return x
query = ensure_bytes(query)
if args is not None:
if isinstance(args, (tuple, list)):
args = tuple(map(ensure_bytes, args))
elif isinstance(args, dict):
args = dict((ensure_bytes(key), ensure_bytes(val)) for (key, val) in args.items())
else:
args = ensure_bytes(args)
if args is not None:
query = query % self._escape_args(args, conn)
return query
def execute(self, query, args=None):
'''Execute a query'''
while self.nextset():
pass
query = self.mogrify(query, args)
result = self._query(query)
self._executed = query
return result
def executemany(self, query, args):
"""Run several data against one query
PyMySQL can execute bulkinsert for query like 'INSERT ... VALUES (%s)'.
In other form of queries, just run :meth:`execute` many times.
"""
if not args:
return
m = RE_INSERT_VALUES.match(query)
if m:
q_prefix = m.group(1)
q_values = m.group(2).rstrip()
q_postfix = m.group(3) or ''
assert q_values[0] == '(' and q_values[-1] == ')'
return self._do_execute_many(q_prefix, q_values, q_postfix, args,
self.max_stmt_length,
self._get_db().encoding)
self.rowcount = sum(self.execute(query, arg) for arg in args)
return self.rowcount
def _do_execute_many(self, prefix, values, postfix, args, max_stmt_length, encoding):
conn = self._get_db()
escape = self._escape_args
if isinstance(prefix, text_type):
prefix = prefix.encode(encoding)
if isinstance(postfix, text_type):
postfix = postfix.encode(encoding)
sql = bytearray(prefix)
args = iter(args)
v = values % escape(next(args), conn)
if isinstance(v, text_type):
if PY2:
v = v.encode(encoding)
else:
v = v.encode(encoding, 'surrogateescape')
sql += v
rows = 0
for arg in args:
v = values % escape(arg, conn)
if isinstance(v, text_type):
if PY2:
v = v.encode(encoding)
else:
v = v.encode(encoding, 'surrogateescape')
if len(sql) + len(v) + len(postfix) + 1 > max_stmt_length:
rows += self.execute(sql + postfix)
sql = bytearray(prefix)
else:
sql += b','
sql += v
rows += self.execute(sql + postfix)
self.rowcount = rows
return rows
def callproc(self, procname, args=()):
"""Execute stored procedure procname with args
procname -- string, name of procedure to execute on server
args -- Sequence of parameters to use with procedure
Returns the original args.
Compatibility warning: PEP-249 specifies that any modified
parameters must be returned. This is currently impossible
as they are only available by storing them in a server
variable and then retrieved by a query. Since stored
procedures return zero or more result sets, there is no
reliable way to get at OUT or INOUT parameters via callproc.
The server variables are named @_procname_n, where procname
is the parameter above and n is the position of the parameter
(from zero). Once all result sets generated by the procedure
have been fetched, you can issue a SELECT @_procname_0, ...
query using .execute() to get any OUT or INOUT values.
Compatibility warning: The act of calling a stored procedure
itself creates an empty result set. This appears after any
result sets generated by the procedure. This is non-standard
behavior with respect to the DB-API. Be sure to use nextset()
to advance through all result sets; otherwise you may get
disconnected.
"""
conn = self._get_db()
for index, arg in enumerate(args):
q = "SET @_%s_%d=%s" % (procname, index, conn.escape(arg))
self._query(q)
self.nextset()
q = "CALL %s(%s)" % (procname,
','.join(['@_%s_%d' % (procname, i)
for i in range_type(len(args))]))
self._query(q)
self._executed = q
return args
def fetchone(self):
''' Fetch the next row '''
self._check_executed()
if self._rows is None or self.rownumber >= len(self._rows):
return None
result = self._rows[self.rownumber]
self.rownumber += 1
return result
def fetchmany(self, size=None):
''' Fetch several rows '''
self._check_executed()
if self._rows is None:
return ()
end = self.rownumber + (size or self.arraysize)
result = self._rows[self.rownumber:end]
self.rownumber = min(end, len(self._rows))
return result
def fetchall(self):
''' Fetch all the rows '''
self._check_executed()
if self._rows is None:
return ()
if self.rownumber:
result = self._rows[self.rownumber:]
else:
result = self._rows
self.rownumber = len(self._rows)
return result
def scroll(self, value, mode='relative'):
self._check_executed()
if mode == 'relative':
r = self.rownumber + value
elif mode == 'absolute':
r = value
else:
raise err.ProgrammingError("unknown scroll mode %s" % mode)
if not (0 <= r < len(self._rows)):
raise IndexError("out of range")
self.rownumber = r
def _query(self, q):
conn = self._get_db()
self._last_executed = q
conn.query(q)
self._do_get_result()
return self.rowcount
def _do_get_result(self):
conn = self._get_db()
self.rownumber = 0
self._result = result = conn._result
self.rowcount = result.affected_rows
self.description = result.description
self.lastrowid = result.insert_id
self._rows = result.rows
if result.warning_count > 0:
self._show_warnings(conn)
def _show_warnings(self, conn):
if self._result and self._result.has_next:
return
ws = conn.show_warnings()
if ws is None:
return
for w in ws:
msg = w[-1]
if PY2:
if isinstance(msg, unicode):
msg = msg.encode('utf-8', 'replace')
warnings.warn(str(msg), err.Warning, 4)
def __iter__(self):
return iter(self.fetchone, None)
Warning = err.Warning
Error = err.Error
InterfaceError = err.InterfaceError
DatabaseError = err.DatabaseError
DataError = err.DataError
OperationalError = err.OperationalError
IntegrityError = err.IntegrityError
InternalError = err.InternalError
ProgrammingError = err.ProgrammingError
NotSupportedError = err.NotSupportedError
class DictCursorMixin(object):
# You can override this to use OrderedDict or other dict-like types.
dict_type = dict
def _do_get_result(self):
super(DictCursorMixin, self)._do_get_result()
fields = []
if self.description:
for f in self._result.fields:
name = f.name
if name in fields:
name = f.table_name + '.' + name
fields.append(name)
self._fields = fields
if fields and self._rows:
self._rows = [self._conv_row(r) for r in self._rows]
def _conv_row(self, row):
if row is None:
return None
return self.dict_type(zip(self._fields, row))
class DictCursor(DictCursorMixin, Cursor):
"""A cursor which returns results as a dictionary"""
class SSCursor(Cursor):
"""
Unbuffered Cursor, mainly useful for queries that return a lot of data,
or for connections to remote servers over a slow network.
Instead of copying every row of data into a buffer, this will fetch
rows as needed. The upside of this, is the client uses much less memory,
and rows are returned much faster when traveling over a slow network,
or if the result set is very big.
There are limitations, though. The MySQL protocol doesn't support
returning the total number of rows, so the only way to tell how many rows
there are is to iterate over every row returned. Also, it currently isn't
possible to scroll backwards, as only the current row is held in memory.
"""
def _conv_row(self, row):
return row
def close(self):
conn = self.connection
if conn is None:
return
if self._result is not None and self._result is conn._result:
self._result._finish_unbuffered_query()
try:
while self.nextset():
pass
finally:
self.connection = None
def _query(self, q):
conn = self._get_db()
self._last_executed = q
conn.query(q, unbuffered=True)
self._do_get_result()
return self.rowcount
def nextset(self):
return self._nextset(unbuffered=True)
def read_next(self):
""" Read next row """
return self._conv_row(self._result._read_rowdata_packet_unbuffered())
def fetchone(self):
""" Fetch next row """
self._check_executed()
row = self.read_next()
if row is None:
return None
self.rownumber += 1
return row
def fetchall(self):
"""
Fetch all, as per MySQLdb. Pretty useless for large queries, as
it is buffered. See fetchall_unbuffered(), if you want an unbuffered
generator version of this method.
"""
return list(self.fetchall_unbuffered())
def fetchall_unbuffered(self):
"""
Fetch all, implemented as a generator, which isn't to standard,
however, it doesn't make sense to return everything in a list, as that
would use ridiculous memory for large result sets.
"""
return iter(self.fetchone, None)
def __iter__(self):
return self.fetchall_unbuffered()
def fetchmany(self, size=None):
""" Fetch many """
self._check_executed()
if size is None:
size = self.arraysize
rows = []
for i in range_type(size):
row = self.read_next()
if row is None:
break
rows.append(row)
self.rownumber += 1
return rows
def scroll(self, value, mode='relative'):
self._check_executed()
if mode == 'relative':
if value < 0:
raise err.NotSupportedError(
"Backwards scrolling not supported by this cursor")
for _ in range_type(value):
self.read_next()
self.rownumber += value
elif mode == 'absolute':
if value < self.rownumber:
raise err.NotSupportedError(
"Backwards scrolling not supported by this cursor")
end = value - self.rownumber
for _ in range_type(end):
self.read_next()
self.rownumber = value
else:
raise err.ProgrammingError("unknown scroll mode %s" % mode)
class SSDictCursor(DictCursorMixin, SSCursor):
""" An unbuffered cursor, which returns results as a dictionary """
|
mit
|
bitemyapp/pgcli
|
tests/utils.py
|
17
|
2002
|
import pytest
import psycopg2
import psycopg2.extras
from pgcli.main import format_output
from pgcli.pgexecute import register_json_typecasters
# TODO: should this be somehow be divined from environment?
POSTGRES_USER, POSTGRES_HOST = 'postgres', 'localhost'
def db_connection(dbname=None):
conn = psycopg2.connect(user=POSTGRES_USER, host=POSTGRES_HOST, database=dbname)
conn.autocommit = True
return conn
try:
conn = db_connection()
CAN_CONNECT_TO_DB = True
SERVER_VERSION = conn.server_version
json_types = register_json_typecasters(conn, lambda x: x)
JSON_AVAILABLE = 'json' in json_types
JSONB_AVAILABLE = 'jsonb' in json_types
except:
CAN_CONNECT_TO_DB = JSON_AVAILABLE = JSONB_AVAILABLE = False
SERVER_VERSION = 0
dbtest = pytest.mark.skipif(
not CAN_CONNECT_TO_DB,
reason="Need a postgres instance at localhost accessible by user 'postgres'")
requires_json = pytest.mark.skipif(
not JSON_AVAILABLE,
reason='Postgres server unavailable or json type not defined')
requires_jsonb = pytest.mark.skipif(
not JSONB_AVAILABLE,
reason='Postgres server unavailable or jsonb type not defined')
def create_db(dbname):
with db_connection().cursor() as cur:
try:
cur.execute('''CREATE DATABASE _test_db''')
except:
pass
def drop_tables(conn):
with conn.cursor() as cur:
cur.execute('''
DROP SCHEMA public CASCADE;
CREATE SCHEMA public;
DROP SCHEMA IF EXISTS schema1 CASCADE;
DROP SCHEMA IF EXISTS schema2 CASCADE''')
def run(executor, sql, join=False, expanded=False, pgspecial=None):
" Return string output for the sql to be run "
result = []
for title, rows, headers, status in executor.run(sql, pgspecial):
result.extend(format_output(title, rows, headers, status, 'psql',
expanded=expanded))
if join:
result = '\n'.join(result)
return result
|
bsd-3-clause
|
victormlourenco/red_kernel_lge_v500
|
Documentation/networking/cxacru-cf.py
|
14668
|
1626
|
#!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
|
gpl-2.0
|
kwlzn/pants
|
src/python/pants/backend/jvm/tasks/jvm_compile/execution_graph.py
|
23
|
11428
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import Queue as queue
import threading
import traceback
from collections import defaultdict, deque
from heapq import heappop, heappush
from pants.base.worker_pool import Work
class Job(object):
"""A unit of scheduling for the ExecutionGraph.
The ExecutionGraph represents a DAG of dependent work. A Job a node in the graph along with the
keys of its dependent jobs.
"""
def __init__(self, key, fn, dependencies, size=0, on_success=None, on_failure=None):
"""
:param key: Key used to reference and look up jobs
:param fn callable: The work to perform
:param dependencies: List of keys for dependent jobs
:param size: Estimated job size used for prioritization
:param on_success: Zero parameter callback to run if job completes successfully. Run on main
thread.
:param on_failure: Zero parameter callback to run if job completes successfully. Run on main
thread."""
self.key = key
self.fn = fn
self.dependencies = dependencies
self.size = size
self.on_success = on_success
self.on_failure = on_failure
def __call__(self):
self.fn()
def run_success_callback(self):
if self.on_success:
self.on_success()
def run_failure_callback(self):
if self.on_failure:
self.on_failure()
UNSTARTED = 'Unstarted'
QUEUED = 'Queued'
SUCCESSFUL = 'Successful'
FAILED = 'Failed'
CANCELED = 'Canceled'
class StatusTable(object):
DONE_STATES = {SUCCESSFUL, FAILED, CANCELED}
def __init__(self, keys, pending_dependencies_count):
self._statuses = {key: UNSTARTED for key in keys}
self._pending_dependencies_count = pending_dependencies_count
def mark_as(self, state, key):
self._statuses[key] = state
def mark_queued(self, key):
self.mark_as(QUEUED, key)
def unfinished_items(self):
"""Returns a list of (name, status) tuples, only including entries marked as unfinished."""
return [(key, stat) for key, stat in self._statuses.items() if stat not in self.DONE_STATES]
def failed_keys(self):
return [key for key, stat in self._statuses.items() if stat == FAILED]
def is_unstarted(self, key):
return self._statuses.get(key) is UNSTARTED
def mark_one_successful_dependency(self, key):
self._pending_dependencies_count[key] -= 1
def is_ready_to_submit(self, key):
return self.is_unstarted(key) and self._pending_dependencies_count[key] == 0
def are_all_done(self):
return all(s in self.DONE_STATES for s in self._statuses.values())
def has_failures(self):
return any(stat is FAILED for stat in self._statuses.values())
class ExecutionFailure(Exception):
"""Raised when work units fail during execution"""
def __init__(self, message, cause=None):
if cause:
message = "{}: {}".format(message, str(cause))
super(ExecutionFailure, self).__init__(message)
self.cause = cause
class UnexecutableGraphError(Exception):
"""Base exception class for errors that make an ExecutionGraph not executable"""
def __init__(self, msg):
super(UnexecutableGraphError, self).__init__("Unexecutable graph: {}".format(msg))
class NoRootJobError(UnexecutableGraphError):
def __init__(self):
super(NoRootJobError, self).__init__(
"All scheduled jobs have dependencies. There must be a circular dependency.")
class UnknownJobError(UnexecutableGraphError):
def __init__(self, undefined_dependencies):
super(UnknownJobError, self).__init__("Undefined dependencies {}"
.format(", ".join(map(repr, undefined_dependencies))))
class JobExistsError(UnexecutableGraphError):
def __init__(self, key):
super(JobExistsError, self).__init__("Job already scheduled {!r}"
.format(key))
class ThreadSafeCounter(object):
def __init__(self):
self.lock = threading.Lock()
self._counter = 0
def get(self):
with self.lock:
return self._counter
def increment(self):
with self.lock:
self._counter += 1
def decrement(self):
with self.lock:
self._counter -= 1
class ExecutionGraph(object):
"""A directed acyclic graph of work to execute.
This is currently only used within jvm compile, but the intent is to unify it with the future
global execution graph.
"""
def __init__(self, job_list):
"""
:param job_list Job: list of Jobs to schedule and run.
"""
self._dependencies = defaultdict(list)
self._dependees = defaultdict(list)
self._jobs = {}
self._job_keys_as_scheduled = []
self._job_keys_with_no_dependencies = []
for job in job_list:
self._schedule(job)
unscheduled_dependencies = set(self._dependees.keys()) - set(self._job_keys_as_scheduled)
if unscheduled_dependencies:
raise UnknownJobError(unscheduled_dependencies)
if len(self._job_keys_with_no_dependencies) == 0:
raise NoRootJobError()
self._job_priority = self._compute_job_priorities(job_list)
def format_dependee_graph(self):
return "\n".join([
"{} -> {{\n {}\n}}".format(key, ',\n '.join(self._dependees[key]))
for key in self._job_keys_as_scheduled
])
def _schedule(self, job):
key = job.key
dependency_keys = job.dependencies
self._job_keys_as_scheduled.append(key)
if key in self._jobs:
raise JobExistsError(key)
self._jobs[key] = job
if len(dependency_keys) == 0:
self._job_keys_with_no_dependencies.append(key)
self._dependencies[key] = dependency_keys
for dependency_key in dependency_keys:
self._dependees[dependency_key].append(key)
def _compute_job_priorities(self, job_list):
"""Walks the dependency graph breadth-first, starting from the most dependent tasks,
and computes the job priority as the sum of the jobs sizes along the critical path."""
job_size = {job.key: job.size for job in job_list}
job_priority = defaultdict(int)
bfs_queue = deque()
for job in job_list:
if len(self._dependees[job.key]) == 0:
job_priority[job.key] = job_size[job.key]
bfs_queue.append(job.key)
satisfied_dependees_count = defaultdict(int)
while len(bfs_queue) > 0:
job_key = bfs_queue.popleft()
for dependency_key in self._dependencies[job_key]:
job_priority[dependency_key] = \
max(job_priority[dependency_key],
job_size[dependency_key] + job_priority[job_key])
satisfied_dependees_count[dependency_key] += 1
if satisfied_dependees_count[dependency_key] == len(self._dependees[dependency_key]):
bfs_queue.append(dependency_key)
return job_priority
def execute(self, pool, log):
"""Runs scheduled work, ensuring all dependencies for each element are done before execution.
:param pool: A WorkerPool to run jobs on
:param log: logger for logging debug information and progress
submits all the work without any dependencies to the worker pool
when a unit of work finishes,
if it is successful
calls success callback
checks for dependees whose dependencies are all successful, and submits them
if it fails
calls failure callback
marks dependees as failed and queues them directly into the finished work queue
when all work is either successful or failed,
cleans up the work pool
if there's an exception on the main thread,
calls failure callback for unfinished work
aborts work pool
re-raises
"""
log.debug(self.format_dependee_graph())
status_table = StatusTable(self._job_keys_as_scheduled,
{key: len(self._jobs[key].dependencies) for key in self._job_keys_as_scheduled})
finished_queue = queue.Queue()
heap = []
jobs_in_flight = ThreadSafeCounter()
def put_jobs_into_heap(job_keys):
for job_key in job_keys:
# minus because jobs with larger priority should go first
heappush(heap, (-self._job_priority[job_key], job_key))
def try_to_submit_jobs_from_heap():
def worker(worker_key, work):
try:
work()
result = (worker_key, SUCCESSFUL, None)
except Exception as e:
result = (worker_key, FAILED, e)
finished_queue.put(result)
jobs_in_flight.decrement()
while len(heap) > 0 and jobs_in_flight.get() < pool.num_workers:
priority, job_key = heappop(heap)
jobs_in_flight.increment()
status_table.mark_queued(job_key)
pool.submit_async_work(Work(worker, [(job_key, (self._jobs[job_key]))]))
def submit_jobs(job_keys):
put_jobs_into_heap(job_keys)
try_to_submit_jobs_from_heap()
try:
submit_jobs(self._job_keys_with_no_dependencies)
while not status_table.are_all_done():
try:
finished_key, result_status, value = finished_queue.get(timeout=10)
except queue.Empty:
log.debug("Waiting on \n {}\n".format("\n ".join(
"{}: {}".format(key, state) for key, state in status_table.unfinished_items())))
try_to_submit_jobs_from_heap()
continue
finished_job = self._jobs[finished_key]
direct_dependees = self._dependees[finished_key]
status_table.mark_as(result_status, finished_key)
# Queue downstream tasks.
if result_status is SUCCESSFUL:
try:
finished_job.run_success_callback()
except Exception as e:
log.debug(traceback.format_exc())
raise ExecutionFailure("Error in on_success for {}".format(finished_key), e)
ready_dependees = []
for dependee in direct_dependees:
status_table.mark_one_successful_dependency(dependee)
if status_table.is_ready_to_submit(dependee):
ready_dependees.append(dependee)
submit_jobs(ready_dependees)
else: # Failed or canceled.
try:
finished_job.run_failure_callback()
except Exception as e:
log.debug(traceback.format_exc())
raise ExecutionFailure("Error in on_failure for {}".format(finished_key), e)
# Propagate failures downstream.
for dependee in direct_dependees:
if status_table.is_unstarted(dependee):
status_table.mark_queued(dependee)
finished_queue.put((dependee, CANCELED, None))
# Log success or failure for this job.
if result_status is FAILED:
log.error("{} failed: {}".format(finished_key, value))
else:
log.debug("{} finished with status {}".format(finished_key, result_status))
except ExecutionFailure:
raise
except Exception as e:
# Call failure callbacks for jobs that are unfinished.
for key, state in status_table.unfinished_items():
self._jobs[key].run_failure_callback()
log.debug(traceback.format_exc())
raise ExecutionFailure("Error running job", e)
if status_table.has_failures():
raise ExecutionFailure("Failed jobs: {}".format(', '.join(status_table.failed_keys())))
|
apache-2.0
|
rasertux/importacsv
|
vendor/mysql/connector/django/features.py
|
22
|
4331
|
# MySQL Connector/Python - MySQL driver written in Python.
# New file added for Django 1.8
import django
if django.VERSION >= (1, 8):
from django.db.backends.base.features import BaseDatabaseFeatures
else:
from django.db.backends import BaseDatabaseFeatures
from django.utils.functional import cached_property
from django.utils import six
try:
import pytz
HAVE_PYTZ = True
except ImportError:
HAVE_PYTZ = False
class DatabaseFeatures(BaseDatabaseFeatures):
"""Features specific to MySQL
Microsecond precision is supported since MySQL 5.6.3 and turned on
by default if this MySQL version is used.
"""
empty_fetchmany_value = []
update_can_self_select = False
allows_group_by_pk = True
related_fields_match_type = True
allow_sliced_subqueries = False
has_bulk_insert = True
has_select_for_update = True
has_select_for_update_nowait = False
supports_forward_references = False
supports_regex_backreferencing = False
supports_date_lookup_using_string = False
can_introspect_autofield = True
can_introspect_binary_field = False
can_introspect_small_integer_field = True
supports_timezones = False
requires_explicit_null_ordering_when_grouping = True
allows_auto_pk_0 = False
allows_primary_key_0 = False
uses_savepoints = True
atomic_transactions = False
supports_column_check_constraints = False
if django.VERSION < (1, 8):
supports_long_model_names = False
supports_binary_field = six.PY2
can_introspect_boolean_field = False
def __init__(self, connection):
super(DatabaseFeatures, self).__init__(connection)
@cached_property
def supports_microsecond_precision(self):
if self.connection.mysql_version >= (5, 6, 3):
return True
return False
@cached_property
def mysql_storage_engine(self):
"""Get default storage engine of MySQL
This method creates a table without ENGINE table option and inspects
which engine was used.
Used by Django tests.
"""
tblname = 'INTROSPECT_TEST'
droptable = 'DROP TABLE IF EXISTS {table}'.format(table=tblname)
with self.connection.cursor() as cursor:
cursor.execute(droptable)
cursor.execute('CREATE TABLE {table} (X INT)'.format(table=tblname))
if self.connection.mysql_version >= (5, 0, 0):
cursor.execute(
"SELECT ENGINE FROM INFORMATION_SCHEMA.TABLES "
"WHERE TABLE_SCHEMA = %s AND TABLE_NAME = %s",
(self.connection.settings_dict['NAME'], tblname))
engine = cursor.fetchone()[0]
else:
# Very old MySQL servers..
cursor.execute("SHOW TABLE STATUS WHERE Name='{table}'".format(
table=tblname))
engine = cursor.fetchone()[1]
cursor.execute(droptable)
self._cached_storage_engine = engine
return engine
@cached_property
def _disabled_supports_transactions(self):
return self.mysql_storage_engine == 'InnoDB'
@cached_property
def can_introspect_foreign_keys(self):
"""Confirm support for introspected foreign keys
Only the InnoDB storage engine supports Foreigen Key (not taking
into account MySQL Cluster here).
"""
return self.mysql_storage_engine == 'InnoDB'
@cached_property
def has_zoneinfo_database(self):
"""Tests if the time zone definitions are installed
MySQL accepts full time zones names (eg. Africa/Nairobi) but rejects
abbreviations (eg. EAT). When pytz isn't installed and the current
time zone is LocalTimezone (the only sensible value in this context),
the current time zone name will be an abbreviation. As a consequence,
MySQL cannot perform time zone conversions reliably.
"""
# Django 1.6
if not HAVE_PYTZ:
return False
with self.connection.cursor() as cursor:
cursor.execute("SELECT 1 FROM mysql.time_zone LIMIT 1")
return cursor.fetchall() != []
def introspected_boolean_field_type(self, *args, **kwargs):
# New in Django 1.8
return 'IntegerField'
|
gpl-3.0
|
mustafat/odoo-1
|
addons/account/report/account_balance.py
|
198
|
5905
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
from common_report_header import common_report_header
class account_balance(report_sxw.rml_parse, common_report_header):
_name = 'report.account.account.balance'
def __init__(self, cr, uid, name, context=None):
super(account_balance, self).__init__(cr, uid, name, context=context)
self.sum_debit = 0.00
self.sum_credit = 0.00
self.date_lst = []
self.date_lst_string = ''
self.result_acc = []
self.localcontext.update({
'time': time,
'lines': self.lines,
'sum_debit': self._sum_debit,
'sum_credit': self._sum_credit,
'get_fiscalyear':self._get_fiscalyear,
'get_filter': self._get_filter,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period ,
'get_account': self._get_account,
'get_journal': self._get_journal,
'get_start_date':self._get_start_date,
'get_end_date':self._get_end_date,
'get_target_move': self._get_target_move,
})
self.context = context
def set_context(self, objects, data, ids, report_type=None):
new_ids = ids
if (data['model'] == 'ir.ui.menu'):
new_ids = 'chart_account_id' in data['form'] and [data['form']['chart_account_id']] or []
objects = self.pool.get('account.account').browse(self.cr, self.uid, new_ids)
return super(account_balance, self).set_context(objects, data, new_ids, report_type=report_type)
def lines(self, form, ids=None, done=None):
def _process_child(accounts, disp_acc, parent):
account_rec = [acct for acct in accounts if acct['id']==parent][0]
currency_obj = self.pool.get('res.currency')
acc_id = self.pool.get('account.account').browse(self.cr, self.uid, account_rec['id'])
currency = acc_id.currency_id and acc_id.currency_id or acc_id.company_id.currency_id
res = {
'id': account_rec['id'],
'type': account_rec['type'],
'code': account_rec['code'],
'name': account_rec['name'],
'level': account_rec['level'],
'debit': account_rec['debit'],
'credit': account_rec['credit'],
'balance': account_rec['balance'],
'parent_id': account_rec['parent_id'],
'bal_type': '',
}
self.sum_debit += account_rec['debit']
self.sum_credit += account_rec['credit']
if disp_acc == 'movement':
if not currency_obj.is_zero(self.cr, self.uid, currency, res['credit']) or not currency_obj.is_zero(self.cr, self.uid, currency, res['debit']) or not currency_obj.is_zero(self.cr, self.uid, currency, res['balance']):
self.result_acc.append(res)
elif disp_acc == 'not_zero':
if not currency_obj.is_zero(self.cr, self.uid, currency, res['balance']):
self.result_acc.append(res)
else:
self.result_acc.append(res)
if account_rec['child_id']:
for child in account_rec['child_id']:
_process_child(accounts,disp_acc,child)
obj_account = self.pool.get('account.account')
if not ids:
ids = self.ids
if not ids:
return []
if not done:
done={}
ctx = self.context.copy()
ctx['fiscalyear'] = form['fiscalyear_id']
if form['filter'] == 'filter_period':
ctx['period_from'] = form['period_from']
ctx['period_to'] = form['period_to']
elif form['filter'] == 'filter_date':
ctx['date_from'] = form['date_from']
ctx['date_to'] = form['date_to']
ctx['state'] = form['target_move']
parents = ids
child_ids = obj_account._get_children_and_consol(self.cr, self.uid, ids, ctx)
if child_ids:
ids = child_ids
accounts = obj_account.read(self.cr, self.uid, ids, ['type','code','name','debit','credit','balance','parent_id','level','child_id'], ctx)
for parent in parents:
if parent in done:
continue
done[parent] = 1
_process_child(accounts,form['display_account'],parent)
return self.result_acc
class report_trialbalance(osv.AbstractModel):
_name = 'report.account.report_trialbalance'
_inherit = 'report.abstract_report'
_template = 'account.report_trialbalance'
_wrapped_report_class = account_balance
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
ioanpocol/superdesk-core
|
superdesk/storage/amazon_media_storage.py
|
2
|
12253
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015, 2016 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
"""Amazon media storage module."""
from io import BytesIO
from os.path import splitext
from urllib.parse import urlparse
import json
import logging
import time
import boto3
from botocore.client import Config
from eve.io.media import MediaStorage
from mimetypes import guess_extension
from superdesk.media.media_operations import download_file_from_url
from superdesk.utc import query_datetime
logger = logging.getLogger(__name__)
MAX_KEYS = 1000
class AmazonObjectWrapper(BytesIO):
def __init__(self, s3_object, name, metadata):
super().__init__()
s3_body = s3_object['Body']
blocksize = 65636
buf = s3_body.read(amt=blocksize)
while len(buf) > 0:
self.write(buf)
buf = s3_body.read(amt=blocksize)
self.seek(0)
self.content_type = s3_object['ContentType']
self.length = int(s3_object['ContentLength'])
self.name = name
self.filename = name
self.metadata = metadata
self.upload_date = s3_object['LastModified']
self.md5 = s3_object['ETag'][1:-1]
self._id = name
def _guess_extension(content_type):
ext = str(guess_extension(content_type))
if ext in ['.jpe', '.jpeg']:
return '.jpg'
if 'mp3' in content_type or 'audio/mpeg' in content_type:
return '.mp3'
if 'flac' in content_type:
return '.flac'
return ext if ext != 'None' else ''
class AmazonMediaStorage(MediaStorage):
def __init__(self, app=None):
super().__init__(app)
self.client = boto3.client(
's3',
aws_access_key_id=self.app.config['AMAZON_ACCESS_KEY_ID'],
aws_secret_access_key=self.app.config['AMAZON_SECRET_ACCESS_KEY'],
region_name=self.app.config.get('AMAZON_REGION'),
config=Config(signature_version='s3v4'),
)
self.user_metadata_header = 'x-amz-meta-'
def url_for_media(self, media_id, content_type=None):
return self.app.upload_url(str(media_id))
def url_for_download(self, media_id, content_type=None):
return self.app.download_url(str(media_id))
def media_id(self, filename, content_type=None, version=True):
"""Get the ``media_id`` path for the given ``filename``.
if filename doesn't have an extension one is guessed,
and additional *version* option to have automatic version or not to have,
or to send a `string` one.
"""
path = urlparse(filename).path
file_extension = splitext(path)[1]
extension = ''
if not file_extension:
extension = str(_guess_extension(content_type)) if content_type else ''
if version is True:
# automatic version is set on 15mins granularity.
mins_granularity = int(int(time.strftime('%M')) / 4) * 4
version = '%s%s/' % (time.strftime('%Y%m%d%H%m'), mins_granularity)
elif version is False:
version = ''
else:
version = '%s/' % version.strip('/')
return '%s%s%s' % (version, filename, extension)
def fetch_rendition(self, rendition):
stream, name, mime = download_file_from_url(rendition.get('href'))
return stream
def call(self, method, **kw):
kw.setdefault('Bucket', self.app.config['AMAZON_CONTAINER_NAME'])
if 'Key' in kw:
kw['Key'] = self.get_key(kw['Key'])
return getattr(self.client, method)(**kw)
def get_key(self, key):
subfolder = self.app.config.get('AMAZON_S3_SUBFOLDER', 'false')
if key and subfolder and subfolder.lower() != 'false':
key = '%s/%s' % (subfolder.strip('/'), key)
return key
def get(self, id_or_filename, resource=None):
"""Open the file given by name or unique id.
Note that although the returned file is guaranteed to be a File object,
it might actually be some subclass. Returns None if no file was found.
"""
id_or_filename = str(id_or_filename)
try:
obj = self.call('get_object', Key=id_or_filename)
if obj:
metadata = self.extract_metadata_from_headers(obj['Metadata'])
return AmazonObjectWrapper(obj, id_or_filename, metadata)
except Exception:
return None
return None
def get_all_keys(self):
"""Return the list of all keys from the bucket."""
all_keys = []
try:
for objects in self._get_all_keys_in_batches():
all_keys.extend(objects)
except Exception as ex:
logger.exception(ex)
finally:
return all_keys
def _get_all_keys_in_batches(self):
"""Return the list of all keys from the bucket in batches."""
NextMarker = ''
subfolder = self.app.config.get('AMAZON_S3_SUBFOLDER') or ''
while True:
objects = self.call('list_objects', Marker=NextMarker, MaxKeys=MAX_KEYS, Prefix=subfolder)
if not objects or len(objects.get('Contents', [])) == 0:
return
keys = [obj['Key'] for obj in objects.get('Contents', [])]
NextMarker = keys[-1]
yield keys
def extract_metadata_from_headers(self, request_headers):
headers = {}
for key, value in request_headers.items():
if self.user_metadata_header in key:
new_key = key.split(self.user_metadata_header)[1]
if(value):
try:
headers[new_key] = json.loads(value)
except Exception as ex:
logger.exception(ex)
return headers
def put(self, content, filename=None, content_type=None, resource=None, metadata=None, _id=None, version=True,
folder=None):
"""Save a new file using the storage system, preferably with the name specified.
If there already exists a file with this name name, the
storage system may modify the filename as necessary to get a unique
name. Depending on the storage system, a unique id or the actual name
of the stored file will be returned. The content type argument is used
to appropriately identify the file when it is retrieved.
:param ByteIO content: Data to store in the file object
:param str filename: Filename used to store the object
:param str content_type: Content type of the data to be stored
:param resource: Superdesk resource, i.e. 'upload' or 'download'
:param metadata: Not currently used with Amazon S3 storage
:param str _id: ID to be used as the key in the bucket
:param version: If True the timestamp will be prepended to the key else a string can be used to prepend the key
:param str folder: The folder to store the object in
:return str: The ID that was generated for this object
"""
# XXX: we don't use metadata here as Amazon S3 as a limit of 2048 bytes (keys + values)
# and they are anyway stored in MongoDB (and still part of the file). See issue SD-4231
logger.debug('Going to save file file=%s media=%s ' % (filename, _id))
if not _id:
_id = self.media_id(filename, content_type=content_type, version=version)
if folder:
_id = '%s/%s' % (folder.rstrip('/'), _id)
found = self._check_exists(_id)
if found:
return _id
kwargs = {}
acl = self.app.config['AMAZON_OBJECT_ACL']
if acl:
# not sure it's really needed here,
# probably better to turn on/off public-read on the bucket instead
kwargs['ACL'] = acl
try:
self.call(
'put_object',
Key=_id,
Body=content,
ContentType=content_type,
**kwargs
)
return _id
except Exception as ex:
logger.exception(ex)
raise
def delete(self, id_or_filename, resource=None):
id_or_filename = str(id_or_filename)
del_res = self.call('delete_object', Key=id_or_filename)
logger.debug('Amazon S3 file deleted %s with status' % id_or_filename, del_res)
def delete_objects(self, ids):
"""Delete the objects with given list of ids."""
try:
delete_parameters = {'Objects': [{'Key': id} for id in ids], 'Quiet': True}
response = self.call('delete_objects', Delete=delete_parameters)
if len(response.get('Errors', [])):
errors = ','.join(['{}:{}'.format(error['Key'], error['Message']) for error in response['Errors']])
logger.error('Files couldn\'t be deleted: {}'.format(errors))
return False, errors
return True, None
except Exception as ex:
logger.exception(ex)
raise
def exists(self, id_or_filename, resource=None):
"""Test if given name or unique id already exists in storage system."""
id_or_filename = str(id_or_filename)
found = self._check_exists(id_or_filename)
return found
def _check_exists(self, id_or_filename):
try:
self.call('head_object', Key=id_or_filename)
return True
except Exception:
# File not found
return False
def remove_unreferenced_files(self, existing_files):
"""Get the files from S3 and compare against existing and delete the orphans."""
# TODO: Add AMAZON_S3_SUBFOLDER support ref: SDESK-1119
bucket_files = self.get_all_keys()
orphan_files = list(set(bucket_files) - existing_files)
print('There are {} orphan files...'.format(len(orphan_files)))
for i in range(0, len(orphan_files), MAX_KEYS):
batch = orphan_files[i:i + MAX_KEYS]
print('Cleaning %d orphan files...' % len(batch), end='')
deleted, errors = self.delete_objects(batch)
if deleted:
print('done.')
else:
print('failed to clean orphans: {}'.format(errors))
else:
print('There\'s nothing to clean.')
def find(self, folder=None, upload_date=None, resource=None):
"""Search for files in the S3 bucket
Searches for files in the S3 bucket using a combination of folder name and/or upload date
comparisons. Also uses the `superdesk.utc.query_datetime` method to compare the upload_date provided
and the upload_date of the file.
:param str folder: Folder name
:param dict upload_date: Upload date with comparison operator (i.e. $lt, $lte, $gt or $gte)
:param resource: The resource type to use
:return list: List of files that matched the provided parameters
"""
files = []
next_marker = ''
folder = '{}/'.format(folder) if folder else None
while True:
result = self.call(
'list_objects',
Marker=next_marker,
MaxKeys=MAX_KEYS,
Prefix=folder
)
if not result or len(result.get('Contents', [])) <= 0:
break
objects = result.get('Contents', [])
for file in objects:
if upload_date is not None and not query_datetime(file.get('LastModified'), upload_date):
continue
files.append({
'_id': file.get('Key'),
'filename': file.get('Key'),
'upload_date': file.get('LastModified'),
'size': file.get('Size'),
'_etag': file.get('ETag')
})
next_marker = objects[-1]['Key']
return files
def getFilename(self, media_id):
return media_id
|
agpl-3.0
|
direvus/ansible
|
lib/ansible/modules/network/aci/aci_interface_policy_fc.py
|
10
|
6142
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_interface_policy_fc
short_description: Manage Fibre Channel interface policies (fc:IfPol)
description:
- Manage ACI Fiber Channel interface policies on Cisco ACI fabrics.
author:
- Dag Wieers (@dagwieers)
version_added: '2.4'
notes:
- More information about the internal APIC class B(fc:IfPol) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
options:
fc_policy:
description:
- The name of the Fiber Channel interface policy.
required: yes
aliases: [ name ]
description:
description:
- The description of the Fiber Channel interface policy.
aliases: [ descr ]
port_mode:
description:
- The Port Mode to use.
- The APIC defaults to C(f) when unset during creation.
choices: [ f, np ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- aci_interface_policy_fc:
host: '{{ hostname }}'
username: '{{ username }}'
password: '{{ password }}'
fc_policy: '{{ fc_policy }}'
port_mode: '{{ port_mode }}'
description: '{{ description }}'
state: present
delegate_to: localhost
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
fc_policy=dict(type='str', required=False, aliases=['name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
port_mode=dict(type='str', choices=['f', 'np']), # No default provided on purpose
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['fc_policy']],
['state', 'present', ['fc_policy']],
],
)
fc_policy = module.params['fc_policy']
port_mode = module.params['port_mode']
description = module.params['description']
state = module.params['state']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='fcIfPol',
aci_rn='infra/fcIfPol-{0}'.format(fc_policy),
module_object=fc_policy,
target_filter={'name': fc_policy},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='fcIfPol',
class_config=dict(
name=fc_policy,
descr=description,
portMode=port_mode,
),
)
aci.get_diff(aci_class='fcIfPol')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
|
gpl-3.0
|
zsoltdudas/lis-tempest
|
tempest/tests/test_hacking.py
|
3
|
7788
|
# Copyright 2014 Matthew Treinish
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.hacking import checks
from tempest.tests import base
class HackingTestCase(base.TestCase):
"""Test class for hacking rule
This class tests the hacking checks in tempest.hacking.checks by passing
strings to the check methods like the pep8/flake8 parser would. The parser
loops over each line in the file and then passes the parameters to the
check method. The parameter names in the check method dictate what type of
object is passed to the check method. The parameter types are::
logical_line: A processed line with the following modifications:
- Multi-line statements converted to a single line.
- Stripped left and right.
- Contents of strings replaced with "xxx" of same length.
- Comments removed.
physical_line: Raw line of text from the input file.
lines: a list of the raw lines from the input file
tokens: the tokens that contribute to this logical line
line_number: line number in the input file
total_lines: number of lines in the input file
blank_lines: blank lines before this one
indent_char: indentation character in this file (" " or "\t")
indent_level: indentation (with tabs expanded to multiples of 8)
previous_indent_level: indentation on previous line
previous_logical: previous logical line
filename: Path of the file being run through pep8
When running a test on a check method the return will be False/None if
there is no violation in the sample input. If there is an error a tuple is
returned with a position in the line, and a message. So to check the result
just assertTrue if the check is expected to fail and assertFalse if it
should pass.
"""
def test_no_setup_teardown_class_for_tests(self):
self.assertTrue(checks.no_setup_teardown_class_for_tests(
" def setUpClass(cls):", './tempest/tests/fake_test.py'))
self.assertIsNone(checks.no_setup_teardown_class_for_tests(
" def setUpClass(cls): # noqa", './tempest/tests/fake_test.py'))
self.assertTrue(checks.no_setup_teardown_class_for_tests(
" def setUpClass(cls):", './tempest/api/fake_test.py'))
self.assertTrue(checks.no_setup_teardown_class_for_tests(
" def setUpClass(cls):", './tempest/scenario/fake_test.py'))
self.assertFalse(checks.no_setup_teardown_class_for_tests(
" def setUpClass(cls):", './tempest/test.py'))
self.assertTrue(checks.no_setup_teardown_class_for_tests(
" def tearDownClass(cls):", './tempest/tests/fake_test.py'))
self.assertIsNone(checks.no_setup_teardown_class_for_tests(
" def tearDownClass(cls): # noqa", './tempest/tests/fake_test.py'))
self.assertTrue(checks.no_setup_teardown_class_for_tests(
" def tearDownClass(cls):", './tempest/api/fake_test.py'))
self.assertTrue(checks.no_setup_teardown_class_for_tests(
" def tearDownClass(cls):", './tempest/scenario/fake_test.py'))
self.assertFalse(checks.no_setup_teardown_class_for_tests(
" def tearDownClass(cls):", './tempest/test.py'))
def test_import_no_clients_in_api_and_scenario_tests(self):
for client in checks.PYTHON_CLIENTS:
string = "import " + client + "client"
self.assertTrue(
checks.import_no_clients_in_api_and_scenario_tests(
string, './tempest/api/fake_test.py'))
self.assertTrue(
checks.import_no_clients_in_api_and_scenario_tests(
string, './tempest/scenario/fake_test.py'))
self.assertFalse(
checks.import_no_clients_in_api_and_scenario_tests(
string, './tempest/test.py'))
def test_scenario_tests_need_service_tags(self):
self.assertFalse(checks.scenario_tests_need_service_tags(
'def test_fake:', './tempest/scenario/test_fake.py',
"@test.services('compute')"))
self.assertFalse(checks.scenario_tests_need_service_tags(
'def test_fake_test:', './tempest/api/compute/test_fake.py',
"@test.services('image')"))
self.assertFalse(checks.scenario_tests_need_service_tags(
'def test_fake:', './tempest/scenario/orchestration/test_fake.py',
"@test.services('compute')"))
self.assertTrue(checks.scenario_tests_need_service_tags(
'def test_fake_test:', './tempest/scenario/test_fake.py',
'\n'))
self.assertTrue(checks.scenario_tests_need_service_tags(
'def test_fake:', './tempest/scenario/orchestration/test_fake.py',
"\n"))
def test_no_vi_headers(self):
# NOTE(mtreinish) The lines parameter is used only for finding the
# line location in the file. So these tests just pass a list of an
# arbitrary length to use for verifying the check function.
self.assertTrue(checks.no_vi_headers(
'# vim: tabstop=4 shiftwidth=4 softtabstop=4', 1, range(250)))
self.assertTrue(checks.no_vi_headers(
'# vim: tabstop=4 shiftwidth=4 softtabstop=4', 249, range(250)))
self.assertFalse(checks.no_vi_headers(
'# vim: tabstop=4 shiftwidth=4 softtabstop=4', 149, range(250)))
def test_service_tags_not_in_module_path(self):
self.assertTrue(checks.service_tags_not_in_module_path(
"@test.services('compute')", './tempest/api/compute/fake_test.py'))
self.assertFalse(checks.service_tags_not_in_module_path(
"@test.services('compute')",
'./tempest/scenario/compute/fake_test.py'))
self.assertFalse(checks.service_tags_not_in_module_path(
"@test.services('compute')", './tempest/api/image/fake_test.py'))
def test_no_hyphen_at_end_of_rand_name(self):
self.assertIsNone(checks.no_hyphen_at_end_of_rand_name(
'data_utils.rand_name("fake-resource")', './tempest/test_foo.py'))
self.assertEqual(2, len(list(checks.no_hyphen_at_end_of_rand_name(
'data_utils.rand_name("fake-resource-")', './tempest/test_foo.py')
)))
def test_no_mutable_default_args(self):
self.assertEqual(1, len(list(checks.no_mutable_default_args(
" def function1(para={}):"))))
self.assertEqual(1, len(list(checks.no_mutable_default_args(
"def function2(para1, para2, para3=[])"))))
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"defined = []"))))
self.assertEqual(0, len(list(checks.no_mutable_default_args(
"defined, undefined = [], {}"))))
def test_no_testtools_skip_decorator(self):
self.assertEqual(1, len(list(checks.no_testtools_skip_decorator(
" @testtools.skip('Bug xxx')"))))
self.assertEqual(0, len(list(checks.no_testtools_skip_decorator(
" @testtools.skipUnless(CONF.something, 'msg')"))))
self.assertEqual(0, len(list(checks.no_testtools_skip_decorator(
" @testtools.skipIf(CONF.something, 'msg')"))))
|
apache-2.0
|
FlaPer87/django-nonrel
|
tests/regressiontests/makemessages/extraction.py
|
14
|
3452
|
import os
import re
import shutil
from django.test import TestCase
from django.core import management
LOCALE='de'
class ExtractorTests(TestCase):
PO_FILE='locale/%s/LC_MESSAGES/django.po' % LOCALE
def setUp(self):
self._cwd = os.getcwd()
self.test_dir = os.path.abspath(os.path.dirname(__file__))
def _rmrf(self, dname):
if os.path.commonprefix([self.test_dir, os.path.abspath(dname)]) != self.test_dir:
return
shutil.rmtree(dname)
def tearDown(self):
os.chdir(self.test_dir)
try:
self._rmrf('locale/%s' % LOCALE)
except OSError:
pass
os.chdir(self._cwd)
def assertMsgId(self, msgid, s):
return self.assert_(re.search('^msgid "%s"' % msgid, s, re.MULTILINE))
def assertNotMsgId(self, msgid, s):
return self.assert_(not re.search('^msgid "%s"' % msgid, s, re.MULTILINE))
class JavascriptExtractorTests(ExtractorTests):
PO_FILE='locale/%s/LC_MESSAGES/djangojs.po' % LOCALE
def test_javascript_literals(self):
os.chdir(self.test_dir)
management.call_command('makemessages', domain='djangojs', locale=LOCALE, verbosity=0)
self.assert_(os.path.exists(self.PO_FILE))
po_contents = open(self.PO_FILE, 'r').read()
self.assertMsgId('This literal should be included.', po_contents)
self.assertMsgId('This one as well.', po_contents)
class IgnoredExtractorTests(ExtractorTests):
def test_ignore_option(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0, ignore_patterns=['ignore_dir/*'])
self.assert_(os.path.exists(self.PO_FILE))
po_contents = open(self.PO_FILE, 'r').read()
self.assertMsgId('This literal should be included.', po_contents)
self.assertNotMsgId('This should be ignored.', po_contents)
class SymlinkExtractorTests(ExtractorTests):
def setUp(self):
self._cwd = os.getcwd()
self.test_dir = os.path.abspath(os.path.dirname(__file__))
self.symlinked_dir = os.path.join(self.test_dir, 'templates_symlinked')
def tearDown(self):
super(SymlinkExtractorTests, self).tearDown()
os.chdir(self.test_dir)
try:
os.remove(self.symlinked_dir)
except OSError:
pass
os.chdir(self._cwd)
def test_symlink(self):
if hasattr(os, 'symlink'):
if os.path.exists(self.symlinked_dir):
self.assert_(os.path.islink(self.symlinked_dir))
else:
os.symlink(os.path.join(self.test_dir, 'templates'), self.symlinked_dir)
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0, symlinks=True)
self.assert_(os.path.exists(self.PO_FILE))
po_contents = open(self.PO_FILE, 'r').read()
self.assertMsgId('This literal should be included.', po_contents)
self.assert_('templates_symlinked/test.html' in po_contents)
class CopyPluralFormsExtractorTests(ExtractorTests):
def test_copy_plural_forms(self):
os.chdir(self.test_dir)
management.call_command('makemessages', locale=LOCALE, verbosity=0)
self.assert_(os.path.exists(self.PO_FILE))
po_contents = open(self.PO_FILE, 'r').read()
self.assert_('Plural-Forms: nplurals=2; plural=(n != 1)' in po_contents)
|
bsd-3-clause
|
JIC-CSB/dtoolcore
|
docs/source/conf.py
|
1
|
5148
|
# -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u"dtoolcore"
copyright = u"2017, Tjelvar Olsson"
author = u"Tjelvar Olsson"
repo_name = u"dtoolcore"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u"3.13.0"
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Set the readthedocs theme.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
print('using readthedocs theme...')
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify
# it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = '{}doc'.format(repo_name)
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '{}.tex'.format(repo_name),
u'{} Documentation'.format(repo_name),
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, repo_name, u'{} Documentation'.format(repo_name),
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, repo_name, u'{} Documentation'.format(repo_name),
author, repo_name, u'Core API for managing (scientific) data',
'Miscellaneous'),
]
|
mit
|
lucasosouza/berkeleyAI
|
tutorial/testParser.py
|
49
|
2990
|
# testParser.py
# -------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
import re
import sys
class TestParser(object):
def __init__(self, path):
# save the path to the test file
self.path = path
def removeComments(self, rawlines):
# remove any portion of a line following a '#' symbol
fixed_lines = []
for l in rawlines:
idx = l.find('#')
if idx == -1:
fixed_lines.append(l)
else:
fixed_lines.append(l[0:idx])
return '\n'.join(fixed_lines)
def parse(self):
# read in the test case and remove comments
test = {}
with open(self.path) as handle:
raw_lines = handle.read().split('\n')
test_text = self.removeComments(raw_lines)
test['__raw_lines__'] = raw_lines
test['path'] = self.path
test['__emit__'] = []
lines = test_text.split('\n')
i = 0
# read a property in each loop cycle
while(i < len(lines)):
# skip blank lines
if re.match('\A\s*\Z', lines[i]):
test['__emit__'].append(("raw", raw_lines[i]))
i += 1
continue
m = re.match('\A([^"]*?):\s*"([^"]*)"\s*\Z', lines[i])
if m:
test[m.group(1)] = m.group(2)
test['__emit__'].append(("oneline", m.group(1)))
i += 1
continue
m = re.match('\A([^"]*?):\s*"""\s*\Z', lines[i])
if m:
msg = []
i += 1
while(not re.match('\A\s*"""\s*\Z', lines[i])):
msg.append(raw_lines[i])
i += 1
test[m.group(1)] = '\n'.join(msg)
test['__emit__'].append(("multiline", m.group(1)))
i += 1
continue
print 'error parsing test file: %s' % self.path
sys.exit(1)
return test
def emitTestDict(testDict, handle):
for kind, data in testDict['__emit__']:
if kind == "raw":
handle.write(data + "\n")
elif kind == "oneline":
handle.write('%s: "%s"\n' % (data, testDict[data]))
elif kind == "multiline":
handle.write('%s: """\n%s\n"""\n' % (data, testDict[data]))
else:
raise Exception("Bad __emit__")
|
mit
|
wbsavage/shinken
|
shinken/modules/logstore_null.py
|
3
|
2366
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This class store log broks in a black hole
It is one possibility (!) for an exchangeable storage for log broks
"""
from shinken.log import logger
from shinken.basemodule import BaseModule
properties = {
'daemons': ['livestatus'],
'type': 'logstore_null',
'external': False,
'phases': ['running'],
}
# called by the plugin manager
def get_instance(plugin):
logger.info("[Logstore Null] Get an LogStore Null module for plugin %s" % plugin.get_name())
instance = LiveStatusLogStoreNull(plugin)
return instance
class LiveStatusLogStoreNull(BaseModule):
def __init__(self, modconf):
BaseModule.__init__(self, modconf)
self.plugins = []
def load(self, app):
self.app = app
def init(self):
pass
def open(self):
logger.info("[Logstore Null] Open LiveStatusLogStoreNull ok")
def close(self):
pass
def commit(self):
pass
def commit_and_rotate_log_db(self):
pass
def manage_log_brok(self, b):
# log brok successfully stored in the black hole
pass
def add_filter(self, operator, attribute, reference):
pass
def add_filter_and(self, andnum):
pass
def add_filter_or(self, ornum):
pass
def add_filter_not(self):
pass
def get_live_data_log(self):
"""Like get_live_data, but for log objects"""
result = []
return result
|
agpl-3.0
|
aswinpj/Pygments
|
pygments/lexers/_csound_builtins.py
|
23
|
21643
|
# -*- coding: utf-8 -*-
"""
pygments.lexers._csound_builtins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
# Opcodes in Csound 6.05 from
# csound --list-opcodes
# except
# cggoto <http://www.csounds.com/manual/html/cggoto.html>
# cigoto <http://www.csounds.com/manual/html/cigoto.html>
# cingoto (undocumented)
# ckgoto <http://www.csounds.com/manual/html/ckgoto.html>
# cngoto <http://www.csounds.com/manual/html/cngoto.html>
# endin <http://www.csounds.com/manual/html/endin.html
# endop <http://www.csounds.com/manual/html/endop.html
# goto <http://www.csounds.com/manual/html/goto.html>
# igoto <http://www.csounds.com/manual/html/igoto.html>
# instr <http://www.csounds.com/manual/html/instr.html>
# kgoto <http://www.csounds.com/manual/html/kgoto.html>
# loop_ge <http://www.csounds.com/manual/html/loop_ge.html>
# loop_gt <http://www.csounds.com/manual/html/loop_gt.html>
# loop_le <http://www.csounds.com/manual/html/loop_le.html>
# loop_lt <http://www.csounds.com/manual/html/loop_lt.html>
# opcode <http://www.csounds.com/manual/html/opcode.html>
# return <http://www.csounds.com/manual/html/return.html>
# rigoto <http://www.csounds.com/manual/html/rigoto.html>
# tigoto <http://www.csounds.com/manual/html/tigoto.html>
# timout <http://www.csounds.com/manual/html/timout.html>
# which are treated as keywords; the scoreline opcodes
# scoreline <http://www.csounds.com/manual/html/scoreline.html>
# scoreline_i <http://www.csounds.com/manual/html/scoreline_i.html>
# which allow Csound Score highlighting; the pyrun opcodes
# <http://www.csounds.com/manual/html/pyrun.html>
# pylrun
# pylruni
# pylrunt
# pyrun
# pyruni
# pyrunt
# which allow Python highlighting; and the Lua opcodes
# lua_exec <http://www.csounds.com/manual/html/lua_exec.html>
# lua_opdef <http://www.csounds.com/manual/html/lua_opdef.html>
# which allow Lua highlighting.
OPCODES = set((
'ATSadd',
'ATSaddnz',
'ATSbufread',
'ATScross',
'ATSinfo',
'ATSinterpread',
'ATSpartialtap',
'ATSread',
'ATSreadnz',
'ATSsinnoi',
'FLbox',
'FLbutBank',
'FLbutton',
'FLcloseButton',
'FLcolor',
'FLcolor2',
'FLcount',
'FLexecButton',
'FLgetsnap',
'FLgroup',
'FLgroupEnd',
'FLgroup_end',
'FLhide',
'FLhvsBox',
'FLhvsBoxSetValue',
'FLjoy',
'FLkeyIn',
'FLknob',
'FLlabel',
'FLloadsnap',
'FLmouse',
'FLpack',
'FLpackEnd',
'FLpack_end',
'FLpanel',
'FLpanelEnd',
'FLpanel_end',
'FLprintk',
'FLprintk2',
'FLroller',
'FLrun',
'FLsavesnap',
'FLscroll',
'FLscrollEnd',
'FLscroll_end',
'FLsetAlign',
'FLsetBox',
'FLsetColor',
'FLsetColor2',
'FLsetFont',
'FLsetPosition',
'FLsetSize',
'FLsetSnapGroup',
'FLsetText',
'FLsetTextColor',
'FLsetTextSize',
'FLsetTextType',
'FLsetVal',
'FLsetVal_i',
'FLsetVali',
'FLsetsnap',
'FLshow',
'FLslidBnk',
'FLslidBnk2',
'FLslidBnk2Set',
'FLslidBnk2Setk',
'FLslidBnkGetHandle',
'FLslidBnkSet',
'FLslidBnkSetk',
'FLslider',
'FLtabs',
'FLtabsEnd',
'FLtabs_end',
'FLtext',
'FLupdate',
'FLvalue',
'FLvkeybd',
'FLvslidBnk',
'FLvslidBnk2',
'FLxyin',
'MixerClear',
'MixerGetLevel',
'MixerReceive',
'MixerSend',
'MixerSetLevel',
'MixerSetLevel_i',
'OSCinit',
'OSClisten',
'OSCsend',
'a',
'abs',
'active',
'adsr',
'adsyn',
'adsynt',
'adsynt2',
'aftouch',
'alpass',
'alwayson',
'ampdb',
'ampdbfs',
'ampmidi',
'ampmidid',
'areson',
'aresonk',
'array',
'atone',
'atonek',
'atonex',
'babo',
'balance',
'bamboo',
'barmodel',
'bbcutm',
'bbcuts',
'betarand',
'bexprnd',
'bformdec',
'bformdec1',
'bformenc',
'bformenc1',
'binit',
'biquad',
'biquada',
'birnd',
'bqrez',
'buchla',
'butbp',
'butbr',
'buthp',
'butlp',
'butterbp',
'butterbr',
'butterhp',
'butterlp',
'button',
'buzz',
'c2r',
'cabasa',
'cauchy',
'cauchyi',
'ceil',
'cell',
'cent',
'centroid',
'ceps',
#'cggoto',
'chanctrl',
'changed',
'chani',
'chano',
'chebyshevpoly',
'checkbox',
'chn_S',
'chn_a',
'chn_k',
'chnclear',
'chnexport',
'chnget',
'chnmix',
'chnparams',
'chnset',
'chuap',
#'cigoto',
#'cingoto',
#'ckgoto',
'clear',
'clfilt',
'clip',
'clockoff',
'clockon',
'cmplxprod',
#'cngoto',
'comb',
'combinv',
'compilecsd',
'compileorc',
'compilestr',
'compress',
'connect',
'control',
'convle',
'convolve',
'copy2ftab',
'copy2ttab',
'copya2ftab',
'copyf2array',
'cos',
'cosh',
'cosinv',
'cosseg',
'cossegb',
'cossegr',
'cps2pch',
'cpsmidi',
'cpsmidib',
'cpsmidinn',
'cpsoct',
'cpspch',
'cpstmid',
'cpstun',
'cpstuni',
'cpsxpch',
'cpuprc',
'cross2',
'crossfm',
'crossfmi',
'crossfmpm',
'crossfmpmi',
'crosspm',
'crosspmi',
'crunch',
'ctlchn',
'ctrl14',
'ctrl21',
'ctrl7',
'ctrlinit',
'cuserrnd',
'dam',
'date',
'dates',
'db',
'dbamp',
'dbfsamp',
'dcblock',
'dcblock2',
'dconv',
'delay',
'delay1',
'delayk',
'delayr',
'delayw',
'deltap',
'deltap3',
'deltapi',
'deltapn',
'deltapx',
'deltapxw',
'denorm',
'diff',
'diskgrain',
'diskin',
'diskin2',
'dispfft',
'display',
'distort',
'distort1',
'divz',
'doppler',
'downsamp',
'dripwater',
'dumpk',
'dumpk2',
'dumpk3',
'dumpk4',
'duserrnd',
'dust',
'dust2',
#'endin',
#'endop',
'envlpx',
'envlpxr',
'ephasor',
'eqfil',
'evalstr',
'event',
'event_i',
'exciter',
'exitnow',
'exp',
'expcurve',
'expon',
'exprand',
'exprandi',
'expseg',
'expsega',
'expsegb',
'expsegba',
'expsegr',
'fareylen',
'fareyleni',
'faustaudio',
'faustcompile',
'faustctl',
'faustgen',
'fft',
'fftinv',
'ficlose',
'filebit',
'filelen',
'filenchnls',
'filepeak',
'filesr',
'filevalid',
'fillarray',
'filter2',
'fin',
'fini',
'fink',
'fiopen',
'flanger',
'flashtxt',
'flooper',
'flooper2',
'floor',
'fluidAllOut',
'fluidCCi',
'fluidCCk',
'fluidControl',
'fluidEngine',
'fluidLoad',
'fluidNote',
'fluidOut',
'fluidProgramSelect',
'fluidSetInterpMethod',
'fmb3',
'fmbell',
'fmmetal',
'fmpercfl',
'fmrhode',
'fmvoice',
'fmwurlie',
'fof',
'fof2',
'fofilter',
'fog',
'fold',
'follow',
'follow2',
'foscil',
'foscili',
'fout',
'fouti',
'foutir',
'foutk',
'fprintks',
'fprints',
'frac',
'fractalnoise',
'freeverb',
'ftchnls',
'ftconv',
'ftcps',
'ftfree',
'ftgen',
'ftgenonce',
'ftgentmp',
'ftlen',
'ftload',
'ftloadk',
'ftlptim',
'ftmorf',
'ftresize',
'ftresizei',
'ftsave',
'ftsavek',
'ftsr',
'gain',
'gainslider',
'gauss',
'gaussi',
'gausstrig',
'gbuzz',
'genarray',
'genarray_i',
'gendy',
'gendyc',
'gendyx',
'getcfg',
'getcol',
'getrow',
'gogobel',
#'goto',
'grain',
'grain2',
'grain3',
'granule',
'guiro',
'harmon',
'harmon2',
'harmon3',
'harmon4',
'hdf5read',
'hdf5write',
'hilbert',
'hrtfearly',
'hrtfer',
'hrtfmove',
'hrtfmove2',
'hrtfreverb',
'hrtfstat',
'hsboscil',
'hvs1',
'hvs2',
'hvs3',
'i',
'iceps',
#'igoto',
'ihold',
'imagecreate',
'imagefree',
'imagegetpixel',
'imageload',
'imagesave',
'imagesetpixel',
'imagesize',
'in',
'in32',
'inch',
'inh',
'init',
'initc14',
'initc21',
'initc7',
'inleta',
'inletf',
'inletk',
'inletkid',
'inletv',
'ino',
'inq',
'inrg',
'ins',
'insglobal',
'insremot',
#'instr',
'int',
'integ',
'interp',
'invalue',
'inx',
'inz',
'jitter',
'jitter2',
'jspline',
'k',
#'kgoto',
'ktableseg',
'lenarray',
'lentab',
'lfo',
'limit',
'line',
'linen',
'linenr',
'lineto',
'linrand',
'linseg',
'linsegb',
'linsegr',
'locsend',
'locsig',
'log',
'log10',
'log2',
'logbtwo',
'logcurve',
#'loop_ge',
#'loop_gt',
#'loop_le',
#'loop_lt',
'loopseg',
'loopsegp',
'looptseg',
'loopxseg',
'lorenz',
'loscil',
'loscil3',
'loscilx',
'lowpass2',
'lowres',
'lowresx',
'lpf18',
'lpform',
'lpfreson',
'lphasor',
'lpinterp',
'lposcil',
'lposcil3',
'lposcila',
'lposcilsa',
'lposcilsa2',
'lpread',
'lpreson',
'lpshold',
'lpsholdp',
'lpslot',
#'lua_exec',
'lua_ikopcall',
#'lua_opdef',
'mac',
'maca',
'madsr',
'mags',
'mandel',
'mandol',
'maparray',
'maparray_i',
'marimba',
'massign',
'max',
'max_k',
'maxabs',
'maxabsaccum',
'maxaccum',
'maxalloc',
'maxarray',
'maxtab',
'mclock',
'mdelay',
'median',
'mediank',
'metro',
'midglobal',
'midic14',
'midic21',
'midic7',
'midichannelaftertouch',
'midichn',
'midicontrolchange',
'midictrl',
'mididefault',
'midifilestatus',
'midiin',
'midinoteoff',
'midinoteoncps',
'midinoteonkey',
'midinoteonoct',
'midinoteonpch',
'midion',
'midion2',
'midiout',
'midipgm',
'midipitchbend',
'midipolyaftertouch',
'midiprogramchange',
'miditempo',
'midremot',
'min',
'minabs',
'minabsaccum',
'minaccum',
'minarray',
'mincer',
'mintab',
'mirror',
'mode',
'modmatrix',
'monitor',
'moog',
'moogladder',
'moogvcf',
'moogvcf2',
'moscil',
'mp3bitrate',
'mp3in',
'mp3len',
'mp3nchnls',
'mp3sr',
'mpulse',
'mrtmsg',
'multitap',
'mute',
'mxadsr',
'nestedap',
'nlalp',
'nlfilt',
'nlfilt2',
'noise',
'noteoff',
'noteon',
'noteondur',
'noteondur2',
'notnum',
'nreverb',
'nrpn',
'nsamp',
'nstance',
'nstrnum',
'ntrpol',
'octave',
'octcps',
'octmidi',
'octmidib',
'octmidinn',
'octpch',
#'opcode',
'oscbnk',
'oscil',
'oscil1',
'oscil1i',
'oscil3',
'oscili',
'oscilikt',
'osciliktp',
'oscilikts',
'osciln',
'oscils',
'oscilx',
'out',
'out32',
'outc',
'outch',
'outh',
'outiat',
'outic',
'outic14',
'outipat',
'outipb',
'outipc',
'outkat',
'outkc',
'outkc14',
'outkpat',
'outkpb',
'outkpc',
'outleta',
'outletf',
'outletk',
'outletkid',
'outletv',
'outo',
'outq',
'outq1',
'outq2',
'outq3',
'outq4',
'outrg',
'outs',
'outs1',
'outs2',
'outvalue',
'outx',
'outz',
'p',
'pan',
'pan2',
'pareq',
'partials',
'partikkel',
'partikkelget',
'partikkelset',
'partikkelsync',
'passign',
'pcauchy',
'pchbend',
'pchmidi',
'pchmidib',
'pchmidinn',
'pchoct',
'pconvolve',
'pcount',
'pdclip',
'pdhalf',
'pdhalfy',
'peak',
'pgmassign',
'pgmchn',
'phaser1',
'phaser2',
'phasor',
'phasorbnk',
'phs',
'pindex',
'pinker',
'pinkish',
'pitch',
'pitchac',
'pitchamdf',
'planet',
'platerev',
'plltrack',
'pluck',
'poisson',
'pol2rect',
'polyaft',
'polynomial',
'pop',
'pop_f',
'port',
'portk',
'poscil',
'poscil3',
'pow',
'powershape',
'powoftwo',
'prealloc',
'prepiano',
'print',
'print_type',
'printf',
'printf_i',
'printk',
'printk2',
'printks',
'printks2',
'prints',
'product',
'pset',
'ptable',
'ptable3',
'ptablei',
'ptableiw',
'ptablew',
'ptrack',
'push',
'push_f',
'puts',
'pvadd',
'pvbufread',
'pvcross',
'pvinterp',
'pvoc',
'pvread',
'pvs2array',
'pvs2tab',
'pvsadsyn',
'pvsanal',
'pvsarp',
'pvsbandp',
'pvsbandr',
'pvsbin',
'pvsblur',
'pvsbuffer',
'pvsbufread',
'pvsbufread2',
'pvscale',
'pvscent',
'pvsceps',
'pvscross',
'pvsdemix',
'pvsdiskin',
'pvsdisp',
'pvsenvftw',
'pvsfilter',
'pvsfread',
'pvsfreeze',
'pvsfromarray',
'pvsftr',
'pvsftw',
'pvsfwrite',
'pvsgain',
'pvsgendy',
'pvshift',
'pvsifd',
'pvsin',
'pvsinfo',
'pvsinit',
'pvslock',
'pvsmaska',
'pvsmix',
'pvsmooth',
'pvsmorph',
'pvsosc',
'pvsout',
'pvspitch',
'pvstanal',
'pvstencil',
'pvsvoc',
'pvswarp',
'pvsynth',
'pwd',
'pyassign',
'pyassigni',
'pyassignt',
'pycall',
'pycall1',
'pycall1i',
'pycall1t',
'pycall2',
'pycall2i',
'pycall2t',
'pycall3',
'pycall3i',
'pycall3t',
'pycall4',
'pycall4i',
'pycall4t',
'pycall5',
'pycall5i',
'pycall5t',
'pycall6',
'pycall6i',
'pycall6t',
'pycall7',
'pycall7i',
'pycall7t',
'pycall8',
'pycall8i',
'pycall8t',
'pycalli',
'pycalln',
'pycallni',
'pycallt',
'pyeval',
'pyevali',
'pyevalt',
'pyexec',
'pyexeci',
'pyexect',
'pyinit',
'pylassign',
'pylassigni',
'pylassignt',
'pylcall',
'pylcall1',
'pylcall1i',
'pylcall1t',
'pylcall2',
'pylcall2i',
'pylcall2t',
'pylcall3',
'pylcall3i',
'pylcall3t',
'pylcall4',
'pylcall4i',
'pylcall4t',
'pylcall5',
'pylcall5i',
'pylcall5t',
'pylcall6',
'pylcall6i',
'pylcall6t',
'pylcall7',
'pylcall7i',
'pylcall7t',
'pylcall8',
'pylcall8i',
'pylcall8t',
'pylcalli',
'pylcalln',
'pylcallni',
'pylcallt',
'pyleval',
'pylevali',
'pylevalt',
'pylexec',
'pylexeci',
'pylexect',
#'pylrun',
#'pylruni',
#'pylrunt',
#'pyrun',
#'pyruni',
#'pyrunt',
'qinf',
'qnan',
'r2c',
'rand',
'randh',
'randi',
'random',
'randomh',
'randomi',
'rbjeq',
'readclock',
'readf',
'readfi',
'readk',
'readk2',
'readk3',
'readk4',
'readks',
'readscore',
'readscratch',
'rect2pol',
'reinit',
'release',
'remoteport',
'remove',
'repluck',
'reson',
'resonk',
'resonr',
'resonx',
'resonxk',
'resony',
'resonz',
'resyn',
#'return',
'reverb',
'reverb2',
'reverbsc',
'rewindscore',
'rezzy',
'rfft',
'rifft',
#'rigoto',
'rireturn',
'rms',
'rnd',
'rnd31',
'round',
'rspline',
'rtclock',
's16b14',
's32b14',
'samphold',
'sandpaper',
'scale',
'scalearray',
'scalet',
'scanhammer',
'scans',
'scantable',
'scanu',
'schedkwhen',
'schedkwhennamed',
'schedule',
'schedwhen',
#'scoreline',
#'scoreline_i',
'seed',
'sekere',
'semitone',
'sense',
'sensekey',
'seqtime',
'seqtime2',
'serialBegin',
'serialEnd',
'serialFlush',
'serialPrint',
'serialRead',
'serialWrite',
'serialWrite_i',
'setcol',
'setctrl',
'setksmps',
'setrow',
'setscorepos',
'sfilist',
'sfinstr',
'sfinstr3',
'sfinstr3m',
'sfinstrm',
'sfload',
'sflooper',
'sfpassign',
'sfplay',
'sfplay3',
'sfplay3m',
'sfplaym',
'sfplist',
'sfpreset',
'shaker',
'shiftin',
'shiftout',
'signalflowgraph',
'signum',
'sin',
'sinh',
'sininv',
'sinsyn',
'sleighbells',
'slicearray',
'slider16',
'slider16f',
'slider16table',
'slider16tablef',
'slider32',
'slider32f',
'slider32table',
'slider32tablef',
'slider64',
'slider64f',
'slider64table',
'slider64tablef',
'slider8',
'slider8f',
'slider8table',
'slider8tablef',
'sliderKawai',
'sndload',
'sndloop',
'sndwarp',
'sndwarpst',
'sockrecv',
'sockrecvs',
'socksend',
'socksends',
'soundin',
'soundout',
'soundouts',
'space',
'spat3d',
'spat3di',
'spat3dt',
'spdist',
'specaddm',
'specdiff',
'specdisp',
'specfilt',
'spechist',
'specptrk',
'specscal',
'specsum',
'spectrum',
'splitrig',
'sprintf',
'sprintfk',
'spsend',
'sqrt',
'stack',
'statevar',
'stix',
'strcat',
'strcatk',
'strchar',
'strchark',
'strcmp',
'strcmpk',
'strcpy',
'strcpyk',
'strecv',
'streson',
'strfromurl',
'strget',
'strindex',
'strindexk',
'strlen',
'strlenk',
'strlower',
'strlowerk',
'strrindex',
'strrindexk',
'strset',
'strsub',
'strsubk',
'strtod',
'strtodk',
'strtol',
'strtolk',
'strupper',
'strupperk',
'stsend',
'subinstr',
'subinstrinit',
'sum',
'sumarray',
'sumtab',
'svfilter',
'syncgrain',
'syncloop',
'syncphasor',
'system',
'system_i',
'tab',
'tab2pvs',
'tab_i',
'tabgen',
'table',
'table3',
'table3kt',
'tablecopy',
'tablefilter',
'tablefilteri',
'tablegpw',
'tablei',
'tableicopy',
'tableigpw',
'tableikt',
'tableimix',
'tableiw',
'tablekt',
'tablemix',
'tableng',
'tablera',
'tableseg',
'tableshuffle',
'tableshufflei',
'tablew',
'tablewa',
'tablewkt',
'tablexkt',
'tablexseg',
'tabmap',
'tabmap_i',
'tabmorph',
'tabmorpha',
'tabmorphak',
'tabmorphi',
'tabplay',
'tabrec',
'tabslice',
'tabsum',
'tabw',
'tabw_i',
'tambourine',
'tan',
'tanh',
'taninv',
'taninv2',
'tb0',
'tb0_init',
'tb1',
'tb10',
'tb10_init',
'tb11',
'tb11_init',
'tb12',
'tb12_init',
'tb13',
'tb13_init',
'tb14',
'tb14_init',
'tb15',
'tb15_init',
'tb1_init',
'tb2',
'tb2_init',
'tb3',
'tb3_init',
'tb4',
'tb4_init',
'tb5',
'tb5_init',
'tb6',
'tb6_init',
'tb7',
'tb7_init',
'tb8',
'tb8_init',
'tb9',
'tb9_init',
'tbvcf',
'tempest',
'tempo',
'temposcal',
'tempoval',
#'tigoto',
'timedseq',
'timeinstk',
'timeinsts',
'timek',
'times',
#'timout',
'tival',
'tlineto',
'tone',
'tonek',
'tonex',
'tradsyn',
'trandom',
'transeg',
'transegb',
'transegr',
'trcross',
'trfilter',
'trhighest',
'trigger',
'trigseq',
'trirand',
'trlowest',
'trmix',
'trscale',
'trshift',
'trsplit',
'turnoff',
'turnoff2',
'turnon',
'unirand',
'unwrap',
'upsamp',
'urd',
'vactrol',
'vadd',
'vadd_i',
'vaddv',
'vaddv_i',
'vaget',
'valpass',
'vaset',
'vbap',
'vbap16',
'vbap4',
'vbap4move',
'vbap8',
'vbap8move',
'vbapg',
'vbapgmove',
'vbaplsinit',
'vbapmove',
'vbapz',
'vbapzmove',
'vcella',
'vco',
'vco2',
'vco2ft',
'vco2ift',
'vco2init',
'vcomb',
'vcopy',
'vcopy_i',
'vdel_k',
'vdelay',
'vdelay3',
'vdelayk',
'vdelayx',
'vdelayxq',
'vdelayxs',
'vdelayxw',
'vdelayxwq',
'vdelayxws',
'vdivv',
'vdivv_i',
'vecdelay',
'veloc',
'vexp',
'vexp_i',
'vexpseg',
'vexpv',
'vexpv_i',
'vibes',
'vibr',
'vibrato',
'vincr',
'vlimit',
'vlinseg',
'vlowres',
'vmap',
'vmirror',
'vmult',
'vmult_i',
'vmultv',
'vmultv_i',
'voice',
'vosim',
'vphaseseg',
'vport',
'vpow',
'vpow_i',
'vpowv',
'vpowv_i',
'vpvoc',
'vrandh',
'vrandi',
'vsubv',
'vsubv_i',
'vtaba',
'vtabi',
'vtabk',
'vtable1k',
'vtablea',
'vtablei',
'vtablek',
'vtablewa',
'vtablewi',
'vtablewk',
'vtabwa',
'vtabwi',
'vtabwk',
'vwrap',
'waveset',
'weibull',
'wgbow',
'wgbowedbar',
'wgbrass',
'wgclar',
'wgflute',
'wgpluck',
'wgpluck2',
'wguide1',
'wguide2',
'wiiconnect',
'wiidata',
'wiirange',
'wiisend',
'window',
'wrap',
'writescratch',
'wterrain',
'xadsr',
'xin',
'xout',
'xscanmap',
'xscans',
'xscansmap',
'xscanu',
'xtratim',
'xyin',
'zacl',
'zakinit',
'zamod',
'zar',
'zarg',
'zaw',
'zawm',
'zfilter2',
'zir',
'ziw',
'ziwm',
'zkcl',
'zkmod',
'zkr',
'zkw',
'zkwm'
))
|
bsd-2-clause
|
nomadcube/scikit-learn
|
examples/covariance/plot_sparse_cov.py
|
300
|
5078
|
"""
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <gael.varoquaux@inria.fr>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
|
bsd-3-clause
|
Jimmy-Morzaria/scikit-learn
|
sklearn/utils/tests/test_murmurhash.py
|
261
|
2836
|
# Author: Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import numpy as np
from sklearn.externals.six import b, u
from sklearn.utils.murmurhash import murmurhash3_32
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from nose.tools import assert_equal, assert_true
def test_mmhash3_int():
assert_equal(murmurhash3_32(3), 847579505)
assert_equal(murmurhash3_32(3, seed=0), 847579505)
assert_equal(murmurhash3_32(3, seed=42), -1823081949)
assert_equal(murmurhash3_32(3, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=False), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=False), -1823081949)
assert_equal(murmurhash3_32(3, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=0, positive=True), 847579505)
assert_equal(murmurhash3_32(3, seed=42, positive=True), 2471885347)
def test_mmhash3_int_array():
rng = np.random.RandomState(42)
keys = rng.randint(-5342534, 345345, size=3 * 2 * 1).astype(np.int32)
keys = keys.reshape((3, 2, 1))
for seed in [0, 42]:
expected = np.array([murmurhash3_32(int(k), seed)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed), expected)
for seed in [0, 42]:
expected = np.array([murmurhash3_32(k, seed, positive=True)
for k in keys.flat])
expected = expected.reshape(keys.shape)
assert_array_equal(murmurhash3_32(keys, seed, positive=True),
expected)
def test_mmhash3_bytes():
assert_equal(murmurhash3_32(b('foo'), 0), -156908512)
assert_equal(murmurhash3_32(b('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(b('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(b('foo'), 42, positive=True), 2972666014)
def test_mmhash3_unicode():
assert_equal(murmurhash3_32(u('foo'), 0), -156908512)
assert_equal(murmurhash3_32(u('foo'), 42), -1322301282)
assert_equal(murmurhash3_32(u('foo'), 0, positive=True), 4138058784)
assert_equal(murmurhash3_32(u('foo'), 42, positive=True), 2972666014)
def test_no_collision_on_byte_range():
previous_hashes = set()
for i in range(100):
h = murmurhash3_32(' ' * i, 0)
assert_true(h not in previous_hashes,
"Found collision on growing empty string")
def test_uniform_distribution():
n_bins, n_samples = 10, 100000
bins = np.zeros(n_bins, dtype=np.float)
for i in range(n_samples):
bins[murmurhash3_32(i, positive=True) % n_bins] += 1
means = bins / n_samples
expected = np.ones(n_bins) / n_bins
assert_array_almost_equal(means / expected, np.ones(n_bins), 2)
|
bsd-3-clause
|
romankagan/DDBWorkbench
|
python/lib/Lib/encodings/cp850.py
|
593
|
34361
|
""" Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP850.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp850',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00a7: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00b8: 0x00a9, # COPYRIGHT SIGN
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x00a2, # CENT SIGN
0x00be: 0x00a5, # YEN SIGN
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x00f0, # LATIN SMALL LETTER ETH
0x00d1: 0x00d0, # LATIN CAPITAL LETTER ETH
0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00d5: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x00a6, # BROKEN BAR
0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x00fe, # LATIN SMALL LETTER THORN
0x00e8: 0x00de, # LATIN CAPITAL LETTER THORN
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x00af, # MACRON
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x2017, # DOUBLE LOW LINE
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xec' # 0x008d -> LATIN SMALL LETTER I WITH GRAVE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\xff' # 0x0098 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd7' # 0x009e -> MULTIPLICATION SIGN
u'\u0192' # 0x009f -> LATIN SMALL LETTER F WITH HOOK
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xaa' # 0x00a6 -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x00a7 -> MASCULINE ORDINAL INDICATOR
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\xae' # 0x00a9 -> REGISTERED SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xa9' # 0x00b8 -> COPYRIGHT SIGN
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\xa2' # 0x00bd -> CENT SIGN
u'\xa5' # 0x00be -> YEN SIGN
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE
u'\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\xf0' # 0x00d0 -> LATIN SMALL LETTER ETH
u'\xd0' # 0x00d1 -> LATIN CAPITAL LETTER ETH
u'\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\u0131' # 0x00d5 -> LATIN SMALL LETTER DOTLESS I
u'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\xa6' # 0x00dd -> BROKEN BAR
u'\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
u'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\xfe' # 0x00e7 -> LATIN SMALL LETTER THORN
u'\xde' # 0x00e8 -> LATIN CAPITAL LETTER THORN
u'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
u'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xaf' # 0x00ee -> MACRON
u'\xb4' # 0x00ef -> ACUTE ACCENT
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u2017' # 0x00f2 -> DOUBLE LOW LINE
u'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
u'\xb6' # 0x00f4 -> PILCROW SIGN
u'\xa7' # 0x00f5 -> SECTION SIGN
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\xb8' # 0x00f7 -> CEDILLA
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\xa8' # 0x00f9 -> DIAERESIS
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\xb9' # 0x00fb -> SUPERSCRIPT ONE
u'\xb3' # 0x00fc -> SUPERSCRIPT THREE
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x00bd, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a5: 0x00be, # YEN SIGN
0x00a6: 0x00dd, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00a9: 0x00b8, # COPYRIGHT SIGN
0x00aa: 0x00a6, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00af: 0x00ee, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00f7, # CEDILLA
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00ba: 0x00a7, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d0: 0x00d1, # LATIN CAPITAL LETTER ETH
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00de: 0x00e8, # LATIN CAPITAL LETTER THORN
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x008d, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f0: 0x00d0, # LATIN SMALL LETTER ETH
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x00fe: 0x00e7, # LATIN SMALL LETTER THORN
0x00ff: 0x0098, # LATIN SMALL LETTER Y WITH DIAERESIS
0x0131: 0x00d5, # LATIN SMALL LETTER DOTLESS I
0x0192: 0x009f, # LATIN SMALL LETTER F WITH HOOK
0x2017: 0x00f2, # DOUBLE LOW LINE
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
|
apache-2.0
|
PythonProgramming/Beginning-Game-Development-with-Python-and-Pygame
|
Chapter 12/heavyfogtank.py.py
|
3
|
2293
|
from math import radians
from OpenGL.GL import *
from OpenGL.GLU import *
import pygame
from pygame.locals import *
# Import the Model3D class
import model3d
SCREEN_SIZE = (800, 600)
def resize(width, height):
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60.0, float(width)/height, .1, 1000.)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def init():
glEnable(GL_FOG)
glFogfv(GL_FOG_COLOR, (1.0, 1.0, 1.0))
glFogi(GL_FOG_MODE, GL_LINEAR)
glFogf(GL_FOG_START, 1.5)
glFogf(GL_FOG_END, 3.5)
# Enable the GL features we will be using
glEnable(GL_DEPTH_TEST)
glEnable(GL_LIGHTING)
glEnable(GL_COLOR_MATERIAL)
glEnable(GL_TEXTURE_2D)
glEnable(GL_CULL_FACE)
glShadeModel(GL_SMOOTH)
glClearColor(1.0, 1.0, 1.0, 0.0) # white
# Set the material
glMaterial(GL_FRONT, GL_AMBIENT, (0.0, 0.0, 0.0, 1.0))
glMaterial(GL_FRONT, GL_DIFFUSE, (0.2, 0.2, 0.2, 1.0))
glMaterial(GL_FRONT, GL_SPECULAR, (1.0, 1.0, 1.0, 1.0))
glMaterial(GL_FRONT, GL_SHININESS, 10.0)
# Set light parameters
glLight(GL_LIGHT0, GL_AMBIENT, (0.0, 0.0, 0.0, 1.0))
glLight(GL_LIGHT0, GL_DIFFUSE, (0.4, 0.4, 0.4, 1.0))
glLight(GL_LIGHT0, GL_SPECULAR, (1.0, 1.0, 1.0, 1.0))
# Enable light 1 and set position
glEnable(GL_LIGHT0)
glLight(GL_LIGHT0, GL_POSITION, (0, .5, 1, 0))
def run():
pygame.init()
screen = pygame.display.set_mode(SCREEN_SIZE, HWSURFACE|OPENGL|DOUBLEBUF)
resize(*SCREEN_SIZE)
init()
clock = pygame.time.Clock()
# Read the model
tank_model = model3d.Model3D()
tank_model.read_obj('mytank.obj')
rotation = 0.0
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
quit()
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
time_passed = clock.tick()
time_passed_seconds = time_passed / 1000.0
glLoadIdentity()
glRotatef(15, 1, 0, 0)
glTranslatef(0.0, -1.5, -3.5)
rotation += time_passed_seconds * 45.0
glRotatef(rotation, 0, 1, 0)
tank_model.draw_quick()
pygame.display.flip()
if __name__ == "__main__":
run()
|
mit
|
freedomtan/tensorflow
|
tensorflow/python/platform/benchmark_test.py
|
17
|
2860
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for the tf.test.benchmark."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from google.protobuf import json_format
from tensorflow.core.util import test_log_pb2
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class BenchmarkTest(test.TestCase, benchmark.TensorFlowBenchmark):
def testReportBenchmark(self):
output_dir = self.get_temp_dir() + os.path.sep
os.environ['TEST_REPORT_FILE_PREFIX'] = output_dir
proto_file_path = os.path.join(output_dir,
'BenchmarkTest.testReportBenchmark')
if os.path.exists(proto_file_path):
os.remove(proto_file_path)
self.report_benchmark(
iters=2000,
wall_time=1000,
name='testReportBenchmark',
metrics=[{'name': 'metric_name_1', 'value': 0, 'min_value': 1},
{'name': 'metric_name_2', 'value': 90, 'min_value': 0,
'max_value': 95}])
with open(proto_file_path, 'rb') as f:
benchmark_entries = test_log_pb2.BenchmarkEntries()
benchmark_entries.ParseFromString(f.read())
actual_result = json_format.MessageToDict(
benchmark_entries, preserving_proto_field_name=True,
including_default_value_fields=True)['entry'][0]
os.remove(proto_file_path)
expected_result = {
'name': 'BenchmarkTest.testReportBenchmark',
# google.protobuf.json_format.MessageToDict() will convert
# int64 field to string.
'iters': '2000',
'wall_time': 1000,
'cpu_time': 0,
'throughput': 0,
'extras': {},
'metrics': [
{
'name': 'metric_name_1',
'value': 0,
'min_value': 1
},
{
'name': 'metric_name_2',
'value': 90,
'min_value': 0,
'max_value': 95
}
]
}
self.assertEqual(2000, benchmark_entries.entry[0].iters)
self.assertDictEqual(expected_result, actual_result)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
hfp/tensorflow-xsmm
|
tensorflow/contrib/boosted_trees/estimator_batch/custom_loss_head.py
|
87
|
3030
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of `head.Head` with custom loss and link function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
class CustomLossHead(head_lib._RegressionHead): # pylint: disable=protected-access
"""A Head object with custom loss function and link function."""
def __init__(self,
loss_fn,
link_fn,
logit_dimension,
head_name=None,
weight_column_name=None,
metrics_fn=None):
"""`Head` for specifying arbitrary loss function.
Args:
loss_fn: Loss function.
link_fn: Function that converts logits to prediction.
logit_dimension: Number of dimensions for the logits.
head_name: name of the head. Predictions, summary, metrics keys are
suffixed by `"/" + head_name` and the default variable scope is
`head_name`.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
metrics_fn: a function that takes predictions dict, labels and weights and
returns a dictionary of metrics to be calculated.
"""
def loss_wrapper(labels, logits, weight_tensor):
if weight_tensor is None:
weight_tensor = array_ops.ones(
shape=[array_ops.shape(labels)[0], 1], dtype=dtypes.float32)
weighted_loss, _ = loss_fn(labels, weight_tensor, logits)
average_loss = math_ops.reduce_mean(weighted_loss)
return average_loss, average_loss / math_ops.reduce_mean(weight_tensor)
super(CustomLossHead, self).__init__(
loss_fn=loss_wrapper,
link_fn=link_fn,
head_name=head_name,
weight_column_name=weight_column_name,
enable_centered_bias=False,
label_dimension=logit_dimension)
self._metrics_fn = metrics_fn
def _metrics(self, eval_loss, predictions, labels, weights):
if self._metrics_fn is not None:
return self._metrics_fn(predictions, labels, weights)
|
apache-2.0
|
SaschaMester/delicium
|
tools/telemetry/telemetry/internal/util/path.py
|
13
|
1600
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry.core import util
# TODO(dtu): Move these functions from core.util to here.
GetBaseDir = util.GetBaseDir
GetTelemetryDir = util.GetTelemetryDir
GetUnittestDataDir = util.GetUnittestDataDir
GetChromiumSrcDir = util.GetChromiumSrcDir
AddDirToPythonPath = util.AddDirToPythonPath
GetBuildDirectories = util.GetBuildDirectories
def IsExecutable(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
def FindInstalledWindowsApplication(application_path):
"""Search common Windows installation directories for an application.
Args:
application_path: Path to application relative from installation location.
Returns:
A string representing the full path, or None if not found.
"""
search_paths = [os.getenv('PROGRAMFILES(X86)'),
os.getenv('PROGRAMFILES'),
os.getenv('LOCALAPPDATA')]
search_paths += os.getenv('PATH', '').split(os.pathsep)
for search_path in search_paths:
if not search_path:
continue
path = os.path.join(search_path, application_path)
if IsExecutable(path):
return path
return None
def IsSubpath(subpath, superpath):
"""Returns True iff subpath is or is in superpath."""
subpath = os.path.realpath(subpath)
superpath = os.path.realpath(superpath)
while len(subpath) >= len(superpath):
if subpath == superpath:
return True
subpath = os.path.split(subpath)[0]
return False
|
bsd-3-clause
|
alvarofierroclavero/scikit-learn
|
sklearn/ensemble/forest.py
|
176
|
62555
|
"""Forest of trees-based ensemble methods
Those methods include random forests and extremely randomized trees.
The module structure is the following:
- The ``BaseForest`` base class implements a common ``fit`` method for all
the estimators in the module. The ``fit`` method of the base ``Forest``
class calls the ``fit`` method of each sub-estimator on random samples
(with replacement, a.k.a. bootstrap) of the training set.
The init of the sub-estimator is further delegated to the
``BaseEnsemble`` constructor.
- The ``ForestClassifier`` and ``ForestRegressor`` base classes further
implement the prediction logic by computing an average of the predicted
outcomes of the sub-estimators.
- The ``RandomForestClassifier`` and ``RandomForestRegressor`` derived
classes provide the user with concrete implementations of
the forest ensemble method using classical, deterministic
``DecisionTreeClassifier`` and ``DecisionTreeRegressor`` as
sub-estimator implementations.
- The ``ExtraTreesClassifier`` and ``ExtraTreesRegressor`` derived
classes provide the user with concrete implementations of the
forest ensemble method using the extremely randomized trees
``ExtraTreeClassifier`` and ``ExtraTreeRegressor`` as
sub-estimator implementations.
Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# License: BSD 3 clause
from __future__ import division
import warnings
from warnings import warn
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..metrics import r2_score
from ..preprocessing import OneHotEncoder
from ..tree import (DecisionTreeClassifier, DecisionTreeRegressor,
ExtraTreeClassifier, ExtraTreeRegressor)
from ..tree._tree import DTYPE, DOUBLE
from ..utils import check_random_state, check_array, compute_sample_weight
from ..utils.validation import DataConversionWarning, NotFittedError
from .base import BaseEnsemble, _partition_estimators
from ..utils.fixes import bincount
__all__ = ["RandomForestClassifier",
"RandomForestRegressor",
"ExtraTreesClassifier",
"ExtraTreesRegressor",
"RandomTreesEmbedding"]
MAX_INT = np.iinfo(np.int32).max
def _generate_sample_indices(random_state, n_samples):
"""Private function used to _parallel_build_trees function."""
random_instance = check_random_state(random_state)
sample_indices = random_instance.randint(0, n_samples, n_samples)
return sample_indices
def _generate_unsampled_indices(random_state, n_samples):
"""Private function used to forest._set_oob_score fuction."""
sample_indices = _generate_sample_indices(random_state, n_samples)
sample_counts = bincount(sample_indices, minlength=n_samples)
unsampled_mask = sample_counts == 0
indices_range = np.arange(n_samples)
unsampled_indices = indices_range[unsampled_mask]
return unsampled_indices
def _parallel_build_trees(tree, forest, X, y, sample_weight, tree_idx, n_trees,
verbose=0, class_weight=None):
"""Private function used to fit a single tree in parallel."""
if verbose > 1:
print("building tree %d of %d" % (tree_idx + 1, n_trees))
if forest.bootstrap:
n_samples = X.shape[0]
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,), dtype=np.float64)
else:
curr_sample_weight = sample_weight.copy()
indices = _generate_sample_indices(tree.random_state, n_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
if class_weight == 'subsample':
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
curr_sample_weight *= compute_sample_weight('auto', y, indices)
elif class_weight == 'balanced_subsample':
curr_sample_weight *= compute_sample_weight('balanced', y, indices)
tree.fit(X, y, sample_weight=curr_sample_weight, check_input=False)
else:
tree.fit(X, y, sample_weight=sample_weight, check_input=False)
return tree
def _parallel_helper(obj, methodname, *args, **kwargs):
"""Private helper to workaround Python 2 pickle limitations"""
return getattr(obj, methodname)(*args, **kwargs)
class BaseForest(six.with_metaclass(ABCMeta, BaseEnsemble,
_LearntSelectorMixin)):
"""Base class for forests of trees.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(BaseForest, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params)
self.bootstrap = bootstrap
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.warm_start = warm_start
self.class_weight = class_weight
def apply(self, X):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
X_leaves : array_like, shape = [n_samples, n_estimators]
For each datapoint x in X and for each tree in the forest,
return the index of the leaf x ends up in.
"""
X = self._validate_X_predict(X)
results = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(tree, 'apply', X, check_input=False)
for tree in self.estimators_)
return np.array(results).T
def fit(self, X, y, sample_weight=None):
"""Build a forest of trees from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
Returns
-------
self : object
Returns self.
"""
# Validate or convert input data
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
# Remap output
n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
if y.ndim == 2 and y.shape[1] == 1:
warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples,), for example using ravel().",
DataConversionWarning, stacklevel=2)
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
y, expanded_class_weight = self._validate_y_class_weight(y)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Check parameters
self._validate_estimator()
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
random_state = check_random_state(self.random_state)
if not self.warm_start:
# Free allocated memory, if any
self.estimators_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
else:
if self.warm_start and len(self.estimators_) > 0:
# We draw from the random state to get the random state we
# would have got if we hadn't used a warm_start.
random_state.randint(MAX_INT, size=len(self.estimators_))
trees = []
for i in range(n_more_estimators):
tree = self._make_estimator(append=False)
tree.set_params(random_state=random_state.randint(MAX_INT))
trees.append(tree)
# Parallel loop: we use the threading backend as the Cython code
# for fitting the trees is internally releasing the Python GIL
# making threading always more efficient than multiprocessing in
# that case.
trees = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_build_trees)(
t, self, X, y, sample_weight, i, len(trees),
verbose=self.verbose, class_weight=self.class_weight)
for i, t in enumerate(trees))
# Collect newly grown trees
self.estimators_.extend(trees)
if self.oob_score:
self._set_oob_score(X, y)
# Decapsulate classes_ attributes
if hasattr(self, "classes_") and self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y_class_weight(self, y):
# Default implementation
return y, None
def _validate_X_predict(self, X):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
return self.estimators_[0]._validate_X_predict(X, check_input=True)
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, "
"call `fit` before `feature_importances_`.")
all_importances = Parallel(n_jobs=self.n_jobs,
backend="threading")(
delayed(getattr)(tree, 'feature_importances_')
for tree in self.estimators_)
return sum(all_importances) / len(self.estimators_)
class ForestClassifier(six.with_metaclass(ABCMeta, BaseForest,
ClassifierMixin)):
"""Base class for forest of trees-based classifiers.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ForestClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
def _set_oob_score(self, X, y):
"""Compute out-of-bag score"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_classes_ = self.n_classes_
n_samples = y.shape[0]
oob_decision_function = []
oob_score = 0.0
predictions = []
for k in range(self.n_outputs_):
predictions.append(np.zeros((n_samples, n_classes_[k])))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict_proba(X[unsampled_indices, :],
check_input=False)
if self.n_outputs_ == 1:
p_estimator = [p_estimator]
for k in range(self.n_outputs_):
predictions[k][unsampled_indices, :] += p_estimator[k]
for k in range(self.n_outputs_):
if (predictions[k].sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
decision = (predictions[k] /
predictions[k].sum(axis=1)[:, np.newaxis])
oob_decision_function.append(decision)
oob_score += np.mean(y[:, k] ==
np.argmax(predictions[k], axis=1), axis=0)
if self.n_outputs_ == 1:
self.oob_decision_function_ = oob_decision_function[0]
else:
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score / self.n_outputs_
def _validate_y_class_weight(self, y):
y = np.copy(y)
expanded_class_weight = None
if self.class_weight is not None:
y_original = np.copy(y)
self.classes_ = []
self.n_classes_ = []
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
valid_presets = ('auto', 'balanced', 'balanced_subsample', 'subsample', 'auto')
if isinstance(self.class_weight, six.string_types):
if self.class_weight not in valid_presets:
raise ValueError('Valid presets for class_weight include '
'"balanced" and "balanced_subsample". Given "%s".'
% self.class_weight)
if self.class_weight == "subsample":
warn("class_weight='subsample' is deprecated and will be removed in 0.18."
" It was replaced by class_weight='balanced_subsample' "
"using the balanced strategy.", DeprecationWarning)
if self.warm_start:
warn('class_weight presets "balanced" or "balanced_subsample" are '
'not recommended for warm_start if the fitted data '
'differs from the full dataset. In order to use '
'"balanced" weights, use compute_class_weight("balanced", '
'classes, y). In place of y you can use a large '
'enough sample of the full training set target to '
'properly estimate the class frequency '
'distributions. Pass the resulting weights as the '
'class_weight parameter.')
if (self.class_weight not in ['subsample', 'balanced_subsample'] or
not self.bootstrap):
if self.class_weight == 'subsample':
class_weight = 'auto'
elif self.class_weight == "balanced_subsample":
class_weight = "balanced"
else:
class_weight = self.class_weight
with warnings.catch_warnings():
if class_weight == "auto":
warnings.simplefilter('ignore', DeprecationWarning)
expanded_class_weight = compute_sample_weight(class_weight,
y_original)
return y, expanded_class_weight
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is a vote by the trees in
the forest, weighted by their probability estimates. That is,
the predicted class is the one with highest mean probability
estimate across the trees.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
n_samples = proba[0].shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[k],
axis=1),
axis=0)
return predictions
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest. The
class probability of a single tree is the fraction of samples of the same
class in a leaf.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict_proba', X,
check_input=False)
for e in self.estimators_)
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in range(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in range(1, len(all_proba)):
for k in range(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in range(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the trees in the
forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class ForestRegressor(six.with_metaclass(ABCMeta, BaseForest, RegressorMixin)):
"""Base class for forest of trees-based regressors.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator,
n_estimators=10,
estimator_params=tuple(),
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ForestRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
estimator_params=estimator_params,
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
X = self._validate_X_predict(X)
# Assign chunk of trees to jobs
n_jobs, _, _ = _partition_estimators(self.n_estimators, self.n_jobs)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_helper)(e, 'predict', X, check_input=False)
for e in self.estimators_)
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
def _set_oob_score(self, X, y):
"""Compute out-of-bag scores"""
X = check_array(X, dtype=DTYPE, accept_sparse='csr')
n_samples = y.shape[0]
predictions = np.zeros((n_samples, self.n_outputs_))
n_predictions = np.zeros((n_samples, self.n_outputs_))
for estimator in self.estimators_:
unsampled_indices = _generate_unsampled_indices(
estimator.random_state, n_samples)
p_estimator = estimator.predict(
X[unsampled_indices, :], check_input=False)
if self.n_outputs_ == 1:
p_estimator = p_estimator[:, np.newaxis]
predictions[unsampled_indices, :] += p_estimator
n_predictions[unsampled_indices, :] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few trees were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
if self.n_outputs_ == 1:
self.oob_prediction_ = \
self.oob_prediction_.reshape((n_samples, ))
self.oob_score_ = 0.0
for k in range(self.n_outputs_):
self.oob_score_ += r2_score(y[:, k],
predictions[:, k])
self.oob_score_ /= self.n_outputs_
class RandomForestClassifier(ForestClassifier):
"""A random forest classifier.
A random forest is a meta estimator that fits a number of decision tree
classifiers on various sub-samples of the dataset and use averaging to
improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)` (same as "auto").
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeClassifier, ExtraTreesClassifier
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(RandomForestClassifier, self).__init__(
base_estimator=DecisionTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomForestRegressor(ForestRegressor):
"""A random forest regressor.
A random forest is a meta estimator that fits a number of classifying
decision trees on various sub-samples of the dataset and use averaging
to improve the predictive accuracy and control over-fitting.
The sub-sample size is always the same as the original
input sample size but the samples are drawn with replacement if
`bootstrap=True` (default).
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=True)
Whether bootstrap samples are used when building trees.
oob_score : bool
whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] L. Breiman, "Random Forests", Machine Learning, 45(1), 5-32, 2001.
See also
--------
DecisionTreeRegressor, ExtraTreesRegressor
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=True,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomForestRegressor, self).__init__(
base_estimator=DecisionTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesClassifier(ForestClassifier):
"""An extra-trees classifier.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
class_weight : dict, list of dicts, "balanced", "balanced_subsample" or None, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
The "balanced_subsample" mode is the same as "balanced" except that weights are
computed based on the bootstrap sample for every tree grown.
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem), or a list of arrays of
class labels (multi-output problem).
n_classes_ : int or list
The number of classes (single output problem), or a list containing the
number of classes for each output (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeClassifier : Base classifier for this ensemble.
RandomForestClassifier : Ensemble Classifier based on trees with optimal
splits.
"""
def __init__(self,
n_estimators=10,
criterion="gini",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False,
class_weight=None):
super(ExtraTreesClassifier, self).__init__(
base_estimator=ExtraTreeClassifier(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes", "random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start,
class_weight=class_weight)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class ExtraTreesRegressor(ForestRegressor):
"""An extra-trees regressor.
This class implements a meta estimator that fits a number of
randomized decision trees (a.k.a. extra-trees) on various sub-samples
of the dataset and use averaging to improve the predictive accuracy
and control over-fitting.
Read more in the :ref:`User Guide <forest>`.
Parameters
----------
n_estimators : integer, optional (default=10)
The number of trees in the forest.
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error.
Note: this parameter is tree-specific.
max_features : int, float, string or None, optional (default="auto")
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
Note: this parameter is tree-specific.
max_depth : integer or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
Note: this parameter is tree-specific.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
Note: this parameter is tree-specific.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
Note: this parameter is tree-specific.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
Note: this parameter is tree-specific.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
Note: this parameter is tree-specific.
bootstrap : boolean, optional (default=False)
Whether bootstrap samples are used when building trees.
Note: this parameter is tree-specific.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeRegressor
The collection of fitted sub-estimators.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
n_features_ : int
The number of features.
n_outputs_ : int
The number of outputs.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training set.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
See also
--------
sklearn.tree.ExtraTreeRegressor: Base estimator for this ensemble.
RandomForestRegressor: Ensemble regressor using trees with optimal splits.
"""
def __init__(self,
n_estimators=10,
criterion="mse",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
max_leaf_nodes=None,
bootstrap=False,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(ExtraTreesRegressor, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=bootstrap,
oob_score=oob_score,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = criterion
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.max_leaf_nodes = max_leaf_nodes
class RandomTreesEmbedding(BaseForest):
"""An ensemble of totally random trees.
An unsupervised transformation of a dataset to a high-dimensional
sparse representation. A datapoint is coded according to which leaf of
each tree it is sorted into. Using a one-hot encoding of the leaves,
this leads to a binary coding with as many ones as there are trees in
the forest.
The dimensionality of the resulting representation is
``n_out <= n_estimators * max_leaf_nodes``. If ``max_leaf_nodes == None``,
the number of leaf nodes is at most ``n_estimators * 2 ** max_depth``.
Read more in the :ref:`User Guide <random_trees_embedding>`.
Parameters
----------
n_estimators : int
Number of trees in the forest.
max_depth : int
The maximum depth of each tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : integer, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : integer, optional (default=1)
The minimum number of samples in newly created leaves. A split is
discarded if after the split, one of the leaves would contain less then
``min_samples_leaf`` samples.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
sparse_output : bool, optional (default=True)
Whether or not to return a sparse CSR matrix, as default behavior,
or to return a dense array compatible with dense pipeline operators.
n_jobs : integer, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit a whole
new forest.
Attributes
----------
estimators_ : list of DecisionTreeClassifier
The collection of fitted sub-estimators.
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
.. [2] Moosmann, F. and Triggs, B. and Jurie, F. "Fast discriminative
visual codebooks using randomized clustering forests"
NIPS 2007
"""
def __init__(self,
n_estimators=10,
max_depth=5,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_leaf_nodes=None,
sparse_output=True,
n_jobs=1,
random_state=None,
verbose=0,
warm_start=False):
super(RandomTreesEmbedding, self).__init__(
base_estimator=ExtraTreeRegressor(),
n_estimators=n_estimators,
estimator_params=("criterion", "max_depth", "min_samples_split",
"min_samples_leaf", "min_weight_fraction_leaf",
"max_features", "max_leaf_nodes",
"random_state"),
bootstrap=False,
oob_score=False,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose,
warm_start=warm_start)
self.criterion = 'mse'
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = 1
self.max_leaf_nodes = max_leaf_nodes
self.sparse_output = sparse_output
def _set_oob_score(self, X, y):
raise NotImplementedError("OOB score not supported by tree embedding")
def fit(self, X, y=None, sample_weight=None):
"""Fit estimator.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
The input samples. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csc_matrix`` for maximum efficiency.
Returns
-------
self : object
Returns self.
"""
self.fit_transform(X, y, sample_weight=sample_weight)
return self
def fit_transform(self, X, y=None, sample_weight=None):
"""Fit estimator and transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data used to build forests. Use ``dtype=np.float32`` for
maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
# ensure_2d=False because there are actually unit test checking we fail
# for 1d.
X = check_array(X, accept_sparse=['csc'], ensure_2d=False)
if issparse(X):
# Pre-sort indices to avoid that each individual tree of the
# ensemble sorts the indices.
X.sort_indices()
rnd = check_random_state(self.random_state)
y = rnd.uniform(size=X.shape[0])
super(RandomTreesEmbedding, self).fit(X, y,
sample_weight=sample_weight)
self.one_hot_encoder_ = OneHotEncoder(sparse=self.sparse_output)
return self.one_hot_encoder_.fit_transform(self.apply(X))
def transform(self, X):
"""Transform dataset.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Input data to be transformed. Use ``dtype=np.float32`` for maximum
efficiency. Sparse matrices are also supported, use sparse
``csr_matrix`` for maximum efficiency.
Returns
-------
X_transformed : sparse matrix, shape=(n_samples, n_out)
Transformed dataset.
"""
return self.one_hot_encoder_.transform(self.apply(X))
|
bsd-3-clause
|
alex/boto
|
boto/rds/parametergroup.py
|
170
|
7037
|
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class ParameterGroup(dict):
def __init__(self, connection=None):
dict.__init__(self)
self.connection = connection
self.name = None
self.description = None
self.engine = None
self._current_param = None
def __repr__(self):
return 'ParameterGroup:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'Parameter':
if self._current_param:
self[self._current_param.name] = self._current_param
self._current_param = Parameter(self)
return self._current_param
def endElement(self, name, value, connection):
if name == 'DBParameterGroupName':
self.name = value
elif name == 'Description':
self.description = value
elif name == 'Engine':
self.engine = value
else:
setattr(self, name, value)
def modifiable(self):
mod = []
for key in self:
p = self[key]
if p.is_modifiable:
mod.append(p)
return mod
def get_params(self):
pg = self.connection.get_all_dbparameters(self.name)
self.update(pg)
def add_param(self, name, value, apply_method):
param = Parameter()
param.name = name
param.value = value
param.apply_method = apply_method
self.params.append(param)
class Parameter(object):
"""
Represents a RDS Parameter
"""
ValidTypes = {'integer' : int,
'string' : str,
'boolean' : bool}
ValidSources = ['user', 'system', 'engine-default']
ValidApplyTypes = ['static', 'dynamic']
ValidApplyMethods = ['immediate', 'pending-reboot']
def __init__(self, group=None, name=None):
self.group = group
self.name = name
self._value = None
self.type = 'string'
self.source = None
self.is_modifiable = True
self.description = None
self.apply_method = None
self.allowed_values = None
def __repr__(self):
return 'Parameter:%s' % self.name
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'ParameterName':
self.name = value
elif name == 'ParameterValue':
self._value = value
elif name == 'DataType':
if value in self.ValidTypes:
self.type = value
elif name == 'Source':
if value in self.ValidSources:
self.source = value
elif name == 'IsModifiable':
if value.lower() == 'true':
self.is_modifiable = True
else:
self.is_modifiable = False
elif name == 'Description':
self.description = value
elif name == 'ApplyType':
if value in self.ValidApplyTypes:
self.apply_type = value
elif name == 'AllowedValues':
self.allowed_values = value
else:
setattr(self, name, value)
def merge(self, d, i):
prefix = 'Parameters.member.%d.' % i
if self.name:
d[prefix+'ParameterName'] = self.name
if self._value is not None:
d[prefix+'ParameterValue'] = self._value
if self.apply_type:
d[prefix+'ApplyMethod'] = self.apply_method
def _set_string_value(self, value):
if not isinstance(value, basestring):
raise ValueError('value must be of type str')
if self.allowed_values:
choices = self.allowed_values.split(',')
if value not in choices:
raise ValueError('value must be in %s' % self.allowed_values)
self._value = value
def _set_integer_value(self, value):
if isinstance(value, basestring):
value = int(value)
if isinstance(value, int) or isinstance(value, long):
if self.allowed_values:
min, max = self.allowed_values.split('-')
if value < int(min) or value > int(max):
raise ValueError('range is %s' % self.allowed_values)
self._value = value
else:
raise ValueError('value must be integer')
def _set_boolean_value(self, value):
if isinstance(value, bool):
self._value = value
elif isinstance(value, basestring):
if value.lower() == 'true':
self._value = True
else:
self._value = False
else:
raise ValueError('value must be boolean')
def set_value(self, value):
if self.type == 'string':
self._set_string_value(value)
elif self.type == 'integer':
self._set_integer_value(value)
elif self.type == 'boolean':
self._set_boolean_value(value)
else:
raise TypeError('unknown type (%s)' % self.type)
def get_value(self):
if self._value is None:
return self._value
if self.type == 'string':
return self._value
elif self.type == 'integer':
if not isinstance(self._value, int) and not isinstance(self._value, long):
self._set_integer_value(self._value)
return self._value
elif self.type == 'boolean':
if not isinstance(self._value, bool):
self._set_boolean_value(self._value)
return self._value
else:
raise TypeError('unknown type (%s)' % self.type)
value = property(get_value, set_value, 'The value of the parameter')
def apply(self, immediate=False):
if immediate:
self.apply_method = 'immediate'
else:
self.apply_method = 'pending-reboot'
self.group.connection.modify_parameter_group(self.group.name, [self])
|
mit
|
jlegendary/nupic
|
examples/prediction/experiments/confidenceTest/1/description.py
|
17
|
2027
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Tests the following set of sequences:
a-b-c: (7X)
a-d-e: (2X)
a-f-g-a-h: (1X)
We want to insure that when we see 'a', that we predict 'b' with highest
confidence, then 'd', then 'f' and 'h' with equally low confidence.
We expect the following input prediction scores:
inputPredScore_at1 : 0.7
inputPredScore_at2 : 1.0
inputPredScore_at3 : 1.0
inputPredScore_at4 : 1.0
"""
from nupic.frameworks.prediction.helpers import importBaseDescription
config = dict(
sensorVerbosity=0,
spVerbosity=0,
tpVerbosity=0,
ppVerbosity=3,
filenameTrain = 'confidence/confidence1.csv',
filenameTest = 'confidence/confidence1.csv',
iterationCountTrain=None,
iterationCountTest=None,
trainTPRepeats = 3,
trainTP=True,
)
mod = importBaseDescription('../base/description.py', config)
locals().update(mod.__dict__)
|
gpl-3.0
|
SCIP-Interfaces/PySCIPOpt
|
examples/unfinished/portfolio_soco.py
|
2
|
2434
|
"""
portfolio_soco.py: modified markowitz model for portfolio optimization.
Approach: use second-order cone optimization.
Copyright (c) by Joao Pedro PEDROSO, Masahiro MURAMATSU and Mikio KUBO, 2012
"""
from pyscipopt import Model, quicksum, multidict
import math
def phi_inv(p):
"""phi_inv: inverse of gaussian (normal) CDF
Source:
Handbook of Mathematical Functions
Dover Books on Mathematics
Milton Abramowitz and Irene A. Stegun (Editors)
Formula 26.2.23.
"""
if p < 0.5:
t = math.sqrt(-2.0*math.log(p))
return ((0.010328*t + 0.802853)*t + 2.515517)/(((0.001308*t + 0.189269)*t + 1.432788)*t + 1.0) - t
else:
t = math.sqrt(-2.0*math.log(1.0-p))
return t - ((0.010328*t + 0.802853)*t + 2.515517)/(((0.001308*t + 0.189269)*t + 1.432788)*t + 1.0)
def p_portfolio(I,sigma,r,alpha,beta):
"""p_portfolio -- modified markowitz model for portfolio optimization.
Parameters:
- I: set of items
- sigma[i]: standard deviation of item i
- r[i]: revenue of item i
- alpha: acceptance threshold
- beta: desired confidence level
Returns a model, ready to be solved.
"""
model = Model("p_portfolio")
x = {}
for i in I:
x[i] = model.addVar(vtype="C", name="x(%s)"%i) # quantity of i to buy
rho = model.addVar(vtype="C", name="rho")
rhoaux = model.addVar(vtype="C", name="rhoaux")
model.addCons(rho == quicksum(r[i]*x[i] for i in I))
model.addCons(quicksum(x[i] for i in I) == 1)
model.addCons(rhoaux == (alpha - rho)*(1/phi_inv(beta))) #todo
model.addCons(quicksum(sigma[i]**2 * x[i] * x[i] for i in I) <= rhoaux * rhoaux)
model.setObjective(rho, "maximize")
model.data = x
return model
if __name__ == "__main__":
# portfolio
I,sigma,r = multidict(
{1:[0.07,1.01],
2:[0.09,1.05],
3:[0.1,1.08],
4:[0.2,1.10],
5:[0.3,1.20]}
)
alpha = 0.95
# beta = 0.1
for beta in [0.1, 0.05, 0.02, 0.01]:
print("\n\n\nbeta:",beta,"phi inv:",phi_inv(beta))
model = p_portfolio(I,sigma,r,alpha,beta)
model.optimize()
x = model.data
EPS = 1.e-6
print("Investment:")
print("%5s\t%8s" % ("i","x[i]"))
for i in I:
print("%5s\t%8g" % (i,model.getVal(x[i])))
print("Objective:",model.getObjVal())
|
mit
|
yosshy/nova
|
nova/tests/unit/cells/test_cells_utils.py
|
9
|
7506
|
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Cells Utility methods
"""
import inspect
import mock
import random
from nova.cells import utils as cells_utils
from nova import exception
from nova import objects
from nova import test
class CellsUtilsTestCase(test.NoDBTestCase):
"""Test case for Cells utility methods."""
def test_get_instances_to_sync(self):
fake_context = 'fake_context'
call_info = {'get_all': 0, 'shuffle': 0}
def random_shuffle(_list):
call_info['shuffle'] += 1
@staticmethod
def instance_get_all_by_filters(context, filters,
sort_key, sort_dir):
self.assertEqual(fake_context, context)
self.assertEqual('deleted', sort_key)
self.assertEqual('asc', sort_dir)
call_info['got_filters'] = filters
call_info['get_all'] += 1
return ['fake_instance1', 'fake_instance2', 'fake_instance3']
self.stubs.Set(objects.InstanceList, 'get_by_filters',
instance_get_all_by_filters)
self.stubs.Set(random, 'shuffle', random_shuffle)
instances = cells_utils.get_instances_to_sync(fake_context)
self.assertTrue(inspect.isgenerator(instances))
self.assertEqual(3, len([x for x in instances]))
self.assertEqual(1, call_info['get_all'])
self.assertEqual({}, call_info['got_filters'])
self.assertEqual(0, call_info['shuffle'])
instances = cells_utils.get_instances_to_sync(fake_context,
shuffle=True)
self.assertTrue(inspect.isgenerator(instances))
self.assertEqual(3, len([x for x in instances]))
self.assertEqual(2, call_info['get_all'])
self.assertEqual({}, call_info['got_filters'])
self.assertEqual(1, call_info['shuffle'])
instances = cells_utils.get_instances_to_sync(fake_context,
updated_since='fake-updated-since')
self.assertTrue(inspect.isgenerator(instances))
self.assertEqual(3, len([x for x in instances]))
self.assertEqual(3, call_info['get_all'])
self.assertEqual({'changes-since': 'fake-updated-since'},
call_info['got_filters'])
self.assertEqual(1, call_info['shuffle'])
instances = cells_utils.get_instances_to_sync(fake_context,
project_id='fake-project',
updated_since='fake-updated-since', shuffle=True)
self.assertTrue(inspect.isgenerator(instances))
self.assertEqual(3, len([x for x in instances]))
self.assertEqual(4, call_info['get_all'])
self.assertEqual({'changes-since': 'fake-updated-since',
'project_id': 'fake-project'}, call_info['got_filters'])
self.assertEqual(2, call_info['shuffle'])
def test_split_cell_and_item(self):
path = 'australia', 'queensland', 'gold_coast'
cell = cells_utils.PATH_CELL_SEP.join(path)
item = 'host_5'
together = cells_utils.cell_with_item(cell, item)
self.assertEqual(cells_utils._CELL_ITEM_SEP.join([cell, item]),
together)
# Test normal usage
result_cell, result_item = cells_utils.split_cell_and_item(together)
self.assertEqual(cell, result_cell)
self.assertEqual(item, result_item)
# Test with no cell
cell = None
together = cells_utils.cell_with_item(cell, item)
self.assertEqual(item, together)
result_cell, result_item = cells_utils.split_cell_and_item(together)
self.assertEqual(cell, result_cell)
self.assertEqual(item, result_item)
def test_add_cell_to_compute_node(self):
fake_compute = objects.ComputeNode(id=1, host='fake')
cell_path = 'fake_path'
proxy = cells_utils.add_cell_to_compute_node(fake_compute, cell_path)
self.assertIsInstance(proxy, cells_utils.ComputeNodeProxy)
self.assertEqual(cells_utils.cell_with_item(cell_path, 1), proxy.id)
self.assertEqual(cells_utils.cell_with_item(cell_path, 'fake'),
proxy.host)
@mock.patch.object(objects.Service, 'obj_load_attr')
def test_add_cell_to_service_no_compute_node(self, mock_get_by_id):
fake_service = objects.Service(id=1, host='fake')
mock_get_by_id.side_effect = exception.ServiceNotFound(service_id=1)
cell_path = 'fake_path'
proxy = cells_utils.add_cell_to_service(fake_service, cell_path)
self.assertIsInstance(proxy, cells_utils.ServiceProxy)
self.assertEqual(cells_utils.cell_with_item(cell_path, 1), proxy.id)
self.assertEqual(cells_utils.cell_with_item(cell_path, 'fake'),
proxy.host)
self.assertRaises(AttributeError,
getattr, proxy, 'compute_node')
def test_add_cell_to_service_with_compute_node(self):
fake_service = objects.Service(id=1, host='fake')
fake_service.compute_node = objects.ComputeNode(id=1, host='fake')
cell_path = 'fake_path'
proxy = cells_utils.add_cell_to_service(fake_service, cell_path)
self.assertIsInstance(proxy, cells_utils.ServiceProxy)
self.assertEqual(cells_utils.cell_with_item(cell_path, 1), proxy.id)
self.assertEqual(cells_utils.cell_with_item(cell_path, 'fake'),
proxy.host)
self.assertRaises(AttributeError,
getattr, proxy, 'compute_node')
def test_proxy_object_serializer_to_primitive(self):
obj = objects.ComputeNode(id=1, host='fake')
obj_proxy = cells_utils.ComputeNodeProxy(obj, 'fake_path')
serializer = cells_utils.ProxyObjectSerializer()
primitive = serializer.serialize_entity('ctx', obj_proxy)
self.assertIsInstance(primitive, dict)
class_name = primitive.pop('cell_proxy.class_name')
cell_path = primitive.pop('cell_proxy.cell_path')
self.assertEqual('ComputeNodeProxy', class_name)
self.assertEqual('fake_path', cell_path)
self.assertEqual(obj.obj_to_primitive(), primitive)
def test_proxy_object_serializer_from_primitive(self):
obj = objects.ComputeNode(id=1, host='fake')
serializer = cells_utils.ProxyObjectSerializer()
# Recreating the primitive by hand to isolate the test for only
# the deserializing method
primitive = obj.obj_to_primitive()
primitive['cell_proxy.class_name'] = 'ComputeNodeProxy'
primitive['cell_proxy.cell_path'] = 'fake_path'
result = serializer.deserialize_entity('ctx', primitive)
self.assertIsInstance(result, cells_utils.ComputeNodeProxy)
self.assertEqual(obj.obj_to_primitive(),
result._obj.obj_to_primitive())
self.assertEqual('fake_path', result._cell_path)
|
apache-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.