id
int64 0
843k
| repository_name
stringlengths 7
55
| file_path
stringlengths 9
332
| class_name
stringlengths 3
290
| human_written_code
stringlengths 12
4.36M
| class_skeleton
stringlengths 19
2.2M
| total_program_units
int64 1
9.57k
| total_doc_str
int64 0
4.2k
| AvgCountLine
float64 0
7.89k
| AvgCountLineBlank
float64 0
300
| AvgCountLineCode
float64 0
7.89k
| AvgCountLineComment
float64 0
7.89k
| AvgCyclomatic
float64 0
130
| CommentToCodeRatio
float64 0
176
| CountClassBase
float64 0
48
| CountClassCoupled
float64 0
589
| CountClassCoupledModified
float64 0
581
| CountClassDerived
float64 0
5.37k
| CountDeclInstanceMethod
float64 0
4.2k
| CountDeclInstanceVariable
float64 0
299
| CountDeclMethod
float64 0
4.2k
| CountDeclMethodAll
float64 0
4.2k
| CountLine
float64 1
115k
| CountLineBlank
float64 0
9.01k
| CountLineCode
float64 0
94.4k
| CountLineCodeDecl
float64 0
46.1k
| CountLineCodeExe
float64 0
91.3k
| CountLineComment
float64 0
27k
| CountStmt
float64 1
93.2k
| CountStmtDecl
float64 0
46.1k
| CountStmtExe
float64 0
90.2k
| MaxCyclomatic
float64 0
759
| MaxInheritanceTree
float64 0
16
| MaxNesting
float64 0
34
| SumCyclomatic
float64 0
6k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3,600 |
AirtestProject/Poco
|
AirtestProject_Poco/poco/drivers/unity3d/test/tutorial/local_positioning1.py
|
poco.drivers.unity3d.test.tutorial.local_positioning1.LocalPositioning1Tutorial
|
class LocalPositioning1Tutorial(TutorialCase):
def runTest(self):
image = self.poco('fish').child(type='Image')
image.focus('center').long_click()
time.sleep(0.2)
image.focus([0.1, 0.1]).long_click()
time.sleep(0.2)
image.focus([0.9, 0.9]).long_click()
time.sleep(0.2)
image.focus([0.5, 0.9]).long_click()
time.sleep(0.2)
|
class LocalPositioning1Tutorial(TutorialCase):
def runTest(self):
pass
| 2 | 0 | 10 | 0 | 10 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 3 | 11 | 0 | 11 | 3 | 9 | 0 | 11 | 3 | 9 | 1 | 2 | 0 | 1 |
3,601 |
AirtestProject/Poco
|
AirtestProject_Poco/poco/drivers/unity3d/test/tutorial/local_positioning2.py
|
poco.drivers.unity3d.test.tutorial.local_positioning2.LocalPositioning1Tutorial
|
class LocalPositioning1Tutorial(TutorialCase):
def runTest(self):
balloonfish_image = self.poco(text='balloonfish').focus([0.5, -3])
balloonfish_image.long_click()
time.sleep(0.2)
|
class LocalPositioning1Tutorial(TutorialCase):
def runTest(self):
pass
| 2 | 0 | 4 | 0 | 4 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 3 | 5 | 0 | 5 | 3 | 3 | 0 | 5 | 3 | 3 | 1 | 2 | 0 | 1 |
3,602 |
AirtestProject/Poco
|
AirtestProject_Poco/poco/drivers/std/dumper.py
|
poco.drivers.std.dumper.StdDumper
|
class StdDumper(FrozenUIDumper):
def __init__(self, rpcclient):
super(StdDumper, self).__init__()
self.rpcclient = rpcclient
@sync_wrapper
def dumpHierarchy(self, onlyVisibleNode=True):
return self.rpcclient.call("Dump", onlyVisibleNode)
|
class StdDumper(FrozenUIDumper):
def __init__(self, rpcclient):
pass
@sync_wrapper
def dumpHierarchy(self, onlyVisibleNode=True):
pass
| 4 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 2 | 1 | 2 | 9 | 8 | 1 | 7 | 5 | 3 | 0 | 6 | 4 | 3 | 1 | 4 | 0 | 2 |
3,603 |
Ajedi32/echovr-api
|
Ajedi32_echovr-api/echovr_api/game_state.py
|
echovr_api.game_state.GameState
|
class GameState():
"""Represents the current state of a game sesion.
Initialized using data directly from the Echo VR API. See `the Echo VR API
documentation`__ for further details on the attributes associated with this
class, and the expected intialization parameters.
__ https://github.com/Ajedi32/echovr_api_docs#properties
:param client_name:
The username of the currently signed-in user.
:param sessionid:
A 128-bit string-encoded GUID.
:param match_type:
Represents the type of match being played.
:param map_name:
Represents the current "map" (environment) the user is playing in.
:param private_match:
Whether the current session is a private match.
:param tournament_match:
Whether the current session is being used for an official tournament.
:param game_clock_display:
A human-readable representation of the current game clock time.
:param game_clock:
The current game clock time, in seconds.
:param game_status:
The current game's status.
:param possession:
An arry of two integers representing which team currently posesses the
disk.
:param blue_points:
The current score of the blue team.
:param orange_points:
The current score of the orange team.
:param disc:
A dict representing the current state of the disk.
:param last_score:
A dict containing facts and statistics related to the last goal scored.
:param teams:
An array of dicts containing data used to instantiate the game's two
teams.
:raises InvalidGameStateError:
Raised when you attempt to initialize the game into a state that doesn't
make sense (such as having three teams).
"""
def __init__(self, client_name: str = None, sessionid: str = None,
match_type: str = "INVALID GAMETYPE",
map_name: str = "INVALID LEVEL",
private_match: bool = False,
tournament_match: bool = False,
game_clock_display: str = None,
game_clock: float = None, game_status: str = 'unknown',
possession: List[int] = [], blue_points: int = 0,
orange_points: int = 0, disc: dict = {},
last_score: dict = {}, teams: List[dict] = []):
#: The username of the currently signed-in user.
self.client_name = client_name
#: A 128-bit string-encoded GUID.
self.sessionid = sessionid
#: Represents the type of match being played.
self.match_type = match_type
#: Represents the current "map" (environment) the user is playing in.
self.map_name = map_name
#: Whether the current session is a private match.
self.private_match = private_match
#: Whether the current session is being used for an official tournament.
self.tournament_match = tournament_match
#: A human-readable representation of the current game clock time.
self.game_clock_display = game_clock_display
#: The current game clock time, in seconds.
self.game_clock = game_clock
#: The current game's status.
self.game_status = game_status
#: An arry of two integers representing which team currently posesses
#: the disk.
self.possession = possession
#: The current score of the blue team.
self.blue_points = blue_points
#: The current score of the orange team.
self.orange_points = orange_points
#: A :class:`~.Disk` object representing the current state of the disk.
self.disc = Disk(**disc)
#: A :class:`~.ContextualizedLastScore` object containing facts and
#: statistics related to the last goal scored.
self.last_score = ContextualizedLastScore(self, **last_score)
if len(teams) != 2:
raise InvalidGameStateError("Unexpected number of teams: %s" % len(teams))
#: An array of both :class:`~.Team`\ s currently in the game
self.teams = [Team(**data) for data in teams]
#: An array of all :class:`~.Player`\ s currently in the game
self.players = [player for team in self.teams for player in team.players]
# Note: The positions of the blue and orange teams in the array seem to
# be fixed. Judging color by team name is unreliable, since players can
# set a custom team name for themselves by pressing F11.
#: The :class:`~.Team` object representing the blue team
self.blue_team = self.teams[0]
self.blue_team.color = Team.Color.BLUE
#: The :class:`~.Team` object representing the orange team
self.orange_team = self.teams[1]
self.orange_team.color = Team.Color.ORANGE
# Just in case I'm wrong (or a team decides to be a smart aleck), log
# it...
if self.blue_team == 'ORANGE TEAM' or self.orange_team == 'BLUE TEAM':
logging.warn("Blue/Orange teams might be backwards (judging by their names).")
def find_player(self, username: str = None):
"""Find the :class:`~.Player` with the given properties
Returns the player whose attributes match the given properties, or
``None`` if no match is found.
:param username: The username of the Player
"""
if username != None:
return next((player for player in self.players if player.name == username), None)
else:
return None
def find_team(self, color: str = None):
"""Find the :class:`~.Team` with the given properties
Returns the team whose attributes match the given properties, or
``None`` if no match is found.
:param color: The :class:`~.Team.Color` of the Team
"""
if color != None:
if color is Team.Color.BLUE:
return self.blue_team
else:
return self.orange_team
else:
return None
|
class GameState():
'''Represents the current state of a game sesion.
Initialized using data directly from the Echo VR API. See `the Echo VR API
documentation`__ for further details on the attributes associated with this
class, and the expected intialization parameters.
__ https://github.com/Ajedi32/echovr_api_docs#properties
:param client_name:
The username of the currently signed-in user.
:param sessionid:
A 128-bit string-encoded GUID.
:param match_type:
Represents the type of match being played.
:param map_name:
Represents the current "map" (environment) the user is playing in.
:param private_match:
Whether the current session is a private match.
:param tournament_match:
Whether the current session is being used for an official tournament.
:param game_clock_display:
A human-readable representation of the current game clock time.
:param game_clock:
The current game clock time, in seconds.
:param game_status:
The current game's status.
:param possession:
An arry of two integers representing which team currently posesses the
disk.
:param blue_points:
The current score of the blue team.
:param orange_points:
The current score of the orange team.
:param disc:
A dict representing the current state of the disk.
:param last_score:
A dict containing facts and statistics related to the last goal scored.
:param teams:
An array of dicts containing data used to instantiate the game's two
teams.
:raises InvalidGameStateError:
Raised when you attempt to initialize the game into a state that doesn't
make sense (such as having three teams).
'''
def __init__(self, client_name: str = None, sessionid: str = None,
match_type: str = "INVALID GAMETYPE",
map_name: str = "INVALID LEVEL",
private_match: bool = False,
tournament_match: bool = False,
game_clock_display: str = None,
game_clock: float = None, game_status: str = 'unknown',
possession: List[int] = [], blue_points: int = 0,
orange_points: int = 0, disc: dict = {},
last_score: dict = {}, teams: List[dict] = []):
pass
def find_player(self, username: str = None):
'''Find the :class:`~.Player` with the given properties
Returns the player whose attributes match the given properties, or
``None`` if no match is found.
:param username: The username of the Player
'''
pass
def find_team(self, color: str = None):
'''Find the :class:`~.Team` with the given properties
Returns the team whose attributes match the given properties, or
``None`` if no match is found.
:param color: The :class:`~.Team.Color` of the Team
'''
pass
| 4 | 3 | 36 | 8 | 16 | 12 | 3 | 1.58 | 0 | 10 | 5 | 0 | 3 | 18 | 3 | 3 | 155 | 31 | 48 | 31 | 35 | 76 | 36 | 22 | 32 | 3 | 0 | 2 | 8 |
3,604 |
Ajedi32/echovr-api
|
Ajedi32_echovr-api/echovr_api/game_state.py
|
echovr_api.game_state.InvalidGameStateError
|
class InvalidGameStateError(Exception):
"""Thrown when the state data passed to GameState is invalid"""
pass
|
class InvalidGameStateError(Exception):
'''Thrown when the state data passed to GameState is invalid'''
| 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 3 | 0 | 2 | 1 | 1 | 1 | 2 | 1 | 1 | 0 | 3 | 0 | 0 |
3,605 |
Ajedi32/echovr-api
|
Ajedi32_echovr-api/echovr_api/disk.py
|
echovr_api.disk.Disk
|
class Disk():
"""Represents the state of the Disk.
Initialized using data directly from the Echo VR API. See `the Echo VR API
documentation`__ for further details on the attributes associated with this
class, and the expected intialization parameters.
__ https://github.com/Ajedi32/echovr_api_docs#disc
:param position: The position_ of the Disk within the arena
:param velocity: The current velocity_ of the Disk
:param bounce_count: The number of times the disk has bounced
.. _position:
.. _velocity: https://github.com/Ajedi32/echovr_api_docs#vectors
"""
def __init__(self, position: List[float] = [0.0, 0.0, 0.0],
velocity: List[float] = [0.0, 0.0, 0.0],
bounce_count: int = 0):
#: A :class:`~.Vector3D` representing the current position of the disk
self.position = Vector3D(*position)
#: A :class:`~.Vector3D` representing the current velocity of the disk
self.velocity = Vector3D(*velocity)
#: The number of times the disk has bounced
self.bounce_count = bounce_count
|
class Disk():
'''Represents the state of the Disk.
Initialized using data directly from the Echo VR API. See `the Echo VR API
documentation`__ for further details on the attributes associated with this
class, and the expected intialization parameters.
__ https://github.com/Ajedi32/echovr_api_docs#disc
:param position: The position_ of the Disk within the arena
:param velocity: The current velocity_ of the Disk
:param bounce_count: The number of times the disk has bounced
.. _position:
.. _velocity: https://github.com/Ajedi32/echovr_api_docs#vectors
'''
def __init__(self, position: List[float] = [0.0, 0.0, 0.0],
velocity: List[float] = [0.0, 0.0, 0.0],
bounce_count: int = 0):
pass
| 2 | 1 | 12 | 3 | 6 | 3 | 1 | 2 | 0 | 2 | 0 | 0 | 1 | 3 | 1 | 1 | 28 | 7 | 7 | 7 | 3 | 14 | 5 | 5 | 3 | 1 | 0 | 0 | 1 |
3,606 |
Ajedi32/echovr-api
|
Ajedi32_echovr-api/echovr_api/api.py
|
echovr_api.api.API
|
class API(object):
"""An interface to the Echo VR API
:param base_url: The base URL used to communicate with the Echo VR API
"""
def __init__(self, base_url="http://127.0.0.1/"):
self.base_url = base_url
@property
def _gamestate_url(self):
return self.base_url.rstrip('/') + "/session"
def fetch_state_data(self):
"""Fetch the raw JSON game state data from EchoVR's ``/session`` API
This method could be useful if you want to retrieve some API data not
directly exposed by this Python wrapper. Otherwise, you should probably
use :meth:`fetch_state` instead.
:returns:
An object (probably a :class:`dict`) representing the raw JSON
response returned by the EchoVR client.
:raises requests.exceptions.ConnectionError:
This exception will be thrown if the API is unavaible. This might
indicate that the user is not currently in a match, or that they
didn't launch Echo VR with the `-http` option.
:raises json.decoder.JSONDecodeError:
This exception will be thrown if the data returned by the API is not
valid JSON. Likely indicates a bug in Echo VR or in this library.
"""
response = requests.get(self._gamestate_url)
response_text = response.text.rstrip('\0')
return json.loads(response_text)
def fetch_state(self):
"""
:returns:
A :class:`~.GameState` object representing the state of the current
game session as presented by the API.
:raises requests.exceptions.ConnectionError:
This exception will be thrown if the API is unavaible. This might
indicate that the user is not currently in a match, or that they
didn't launch Echo VR with the `-http` option.
:raises json.decoder.JSONDecodeError:
This exception will be thrown if the data returned by the API is not
valid JSON. Likely indicates a bug in Echo VR or in this library.
"""
return GameState(**self.fetch_state_data())
|
class API(object):
'''An interface to the Echo VR API
:param base_url: The base URL used to communicate with the Echo VR API
'''
def __init__(self, base_url="http://127.0.0.1/"):
pass
@property
def _gamestate_url(self):
pass
def fetch_state_data(self):
'''Fetch the raw JSON game state data from EchoVR's ``/session`` API
This method could be useful if you want to retrieve some API data not
directly exposed by this Python wrapper. Otherwise, you should probably
use :meth:`fetch_state` instead.
:returns:
An object (probably a :class:`dict`) representing the raw JSON
response returned by the EchoVR client.
:raises requests.exceptions.ConnectionError:
This exception will be thrown if the API is unavaible. This might
indicate that the user is not currently in a match, or that they
didn't launch Echo VR with the `-http` option.
:raises json.decoder.JSONDecodeError:
This exception will be thrown if the data returned by the API is not
valid JSON. Likely indicates a bug in Echo VR or in this library.
'''
pass
def fetch_state_data(self):
'''
:returns:
A :class:`~.GameState` object representing the state of the current
game session as presented by the API.
:raises requests.exceptions.ConnectionError:
This exception will be thrown if the API is unavaible. This might
indicate that the user is not currently in a match, or that they
didn't launch Echo VR with the `-http` option.
:raises json.decoder.JSONDecodeError:
This exception will be thrown if the data returned by the API is not
valid JSON. Likely indicates a bug in Echo VR or in this library.
'''
pass
| 6 | 3 | 11 | 2 | 3 | 7 | 1 | 2.5 | 1 | 1 | 1 | 0 | 4 | 1 | 4 | 4 | 53 | 11 | 12 | 9 | 6 | 30 | 11 | 8 | 6 | 1 | 1 | 0 | 4 |
3,607 |
Ajedi32/echovr-api
|
Ajedi32_echovr-api/echovr_api/geometry/vector3d.py
|
echovr_api.geometry.vector3d.Vector3D
|
class Vector3D():
"""Represents a vector in 3D space
:param x: The x coordinate of the vector
:param y: The y coordinate of the vector
:param z: The z coordinate of the vector
"""
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
|
class Vector3D():
'''Represents a vector in 3D space
:param x: The x coordinate of the vector
:param y: The y coordinate of the vector
:param z: The z coordinate of the vector
'''
def __init__(self, x, y, z):
pass
| 2 | 1 | 4 | 0 | 4 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 3 | 1 | 1 | 11 | 1 | 5 | 5 | 3 | 5 | 5 | 5 | 3 | 1 | 0 | 0 | 1 |
3,608 |
Ajedi32/echovr-api
|
Ajedi32_echovr-api/echovr_api/team.py
|
echovr_api.team.Team
|
class Team():
"""Represents the state of a single team in the current game
:param team:
A human-readable team name. Usually either "ORANGE TEAM" or "BLUE TEAM",
but that's subject to change, and may be different during LAN
tournaments (though I've not yet confirmed this).
:param possession:
Indicates whether this team currently has posession of the disk.
:param players:
An array of dicts containing data used to instantiate the team's
players.
:param stats:
A dict containing data used to instantiate the team's current stats.
:param color:
An enumerable `Color` representing the color of the team.
"""
class Color(Enum):
"""Represents the color (blue or orange) of a team"""
BLUE = 0
ORANGE = 1
@classmethod
def by_name(cls, name):
"""Return the `Color` that matches a given color name"""
try:
return cls[name.upper()]
except KeyError:
return None
def __init__(self, team: str = "", possession: bool = False,
players: List[dict] = [], stats: dict = {},
color: Color = None):
self.team = team
self.possession = possession
self.players = [Player(**player_data) for player_data in players]
self.stats = Stats(**stats)
self.color = color
@property
def name(self):
"""Better-named alias for `team`."""
return self.team
@property
def score(self):
"""The current score of the team.
Note: There's currently a bug in the API which makes this inaccurate if
the team has scored self-goals, but it's the best we have for now. If
the API ever exposes more accurate data, this method will be updated to
take advantage of that.
"""
# Note: game_status.(blue|orange)_score are currently bugged to always
# return 0. Once that bug is fixed, this should be updated to use those
# values instead.
return self.stats.points
|
class Team():
'''Represents the state of a single team in the current game
:param team:
A human-readable team name. Usually either "ORANGE TEAM" or "BLUE TEAM",
but that's subject to change, and may be different during LAN
tournaments (though I've not yet confirmed this).
:param possession:
Indicates whether this team currently has posession of the disk.
:param players:
An array of dicts containing data used to instantiate the team's
players.
:param stats:
A dict containing data used to instantiate the team's current stats.
:param color:
An enumerable `Color` representing the color of the team.
'''
class Color(Enum):
'''Represents the color (blue or orange) of a team'''
@classmethod
def by_name(cls, name):
'''Return the `Color` that matches a given color name'''
pass
def __init__(self, team: str = "", possession: bool = False,
players: List[dict] = [], stats: dict = {},
color: Color = None):
pass
@property
def name(self):
'''Better-named alias for `team`.'''
pass
@property
def score(self):
'''The current score of the team.
Note: There's currently a bug in the API which makes this inaccurate if
the team has scored self-goals, but it's the best we have for now. If
the API ever exposes more accurate data, this method will be updated to
take advantage of that.
'''
pass
| 9 | 5 | 8 | 1 | 4 | 3 | 1 | 1.13 | 0 | 6 | 3 | 0 | 3 | 5 | 3 | 3 | 59 | 8 | 24 | 18 | 13 | 27 | 19 | 13 | 13 | 2 | 0 | 1 | 5 |
3,609 |
Ajedi32/echovr-api
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Ajedi32_echovr-api/echovr_api/team.py
|
echovr_api.team.Team.Color
|
class Color(Enum):
"""Represents the color (blue or orange) of a team"""
BLUE = 0
ORANGE = 1
@classmethod
def by_name(cls, name):
"""Return the `Color` that matches a given color name"""
try:
return cls[name.upper()]
except KeyError:
return None
|
class Color(Enum):
'''Represents the color (blue or orange) of a team'''
@classmethod
def by_name(cls, name):
'''Return the `Color` that matches a given color name'''
pass
| 3 | 2 | 6 | 0 | 5 | 1 | 2 | 0.22 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 50 | 12 | 1 | 9 | 5 | 6 | 2 | 8 | 4 | 6 | 2 | 4 | 1 | 2 |
3,610 |
Ajedi32/echovr-api
|
Ajedi32_echovr-api/echovr_api/last_score.py
|
echovr_api.last_score.ContextualizedLastScore
|
class ContextualizedLastScore(LastScore):
"""Statistics about a goal, in the context of a :class:`~.GameState`
The same as the :class:`LastScore` class, but with additional convenience methods
and properties enabled by the context provided by a :class:`~.GameState`
object.
:param game_state:
An object representing the current state of the game
:param superclass_attributes:
Passed to the init method of :class:`LastScore`
"""
def __init__(self, game_state: 'echovr_api.game_state.GameState',
**superclass_attributes):
super().__init__(**superclass_attributes)
#: An object representing the current state of the game
self.game_state = game_state
@property
def team(self):
"""The :class:`~.Team` that scored the goal"""
return self.game_state.find_team(color=self.team_color)
@property
def person_scored(self):
"""The :class:`~.Player` that scored the goal"""
return self.game_state.find_player(username=self.person_scored_username)
player_scored = person_scored
scored_by = person_scored
@property
def assist_scored(self):
"""The :class:`~.Player` that assisted the goal, if any"""
return self.game_state.find_player(username=self.assist_scored_username)
assisted_by = assist_scored
|
class ContextualizedLastScore(LastScore):
'''Statistics about a goal, in the context of a :class:`~.GameState`
The same as the :class:`LastScore` class, but with additional convenience methods
and properties enabled by the context provided by a :class:`~.GameState`
object.
:param game_state:
An object representing the current state of the game
:param superclass_attributes:
Passed to the init method of :class:`LastScore`
'''
def __init__(self, game_state: 'echovr_api.game_state.GameState',
**superclass_attributes):
pass
@property
def team(self):
'''The :class:`~.Team` that scored the goal'''
pass
@property
def person_scored(self):
'''The :class:`~.Player` that scored the goal'''
pass
@property
def assist_scored(self):
'''The :class:`~.Player` that assisted the goal, if any'''
pass
| 8 | 4 | 4 | 0 | 3 | 1 | 1 | 0.76 | 1 | 1 | 0 | 0 | 4 | 1 | 4 | 5 | 37 | 7 | 17 | 13 | 8 | 13 | 13 | 9 | 8 | 1 | 1 | 0 | 4 |
3,611 |
Ajedi32/echovr-api
|
Ajedi32_echovr-api/echovr_api/stats.py
|
echovr_api.stats.Stats
|
class Stats():
"""Statistics about a player or team within the current game
:param possession_time:
Time in seconds that the subject posessed the disk.
:param points:
Points scored by the subject.
:param assists:
Number of goals assisted by the subject.
:param saves:
Number of opposing team goals prevented by the subject.
:param stuns:
Number of times the subject has stunned the opposing team.
:param goals:
Number of goals scored by the subject.
TODO: API always returns zero for teams?
:param passes:
Number of times the subject successfully completed a pass
TODO: API always returns zero for teams?
:param catches:
Number of times the subject succssfully caught a pass by a team member
:param steals:
Number of times the subject stole the disk from the opposing team
:param blocks:
Number of times the subject blocked a punch
TODO: API always returns zero for teams?
:param interceptions:
Number of times the subject intercepted a pass by the opposing team
TODO: API always returns zero for teams?
:param shots_taken:
Number of times the subject attempted a shot on goal
"""
def __init__(self, possession_time: float = 0.0, points: int = 0,
assists: int = 0, saves: int = 0, stuns: int = 0,
goals: int = 0, passes: int = 0, catches: int = 0,
steals: int = 0, blocks: int = 0, interceptions: int = 0,
shots_taken: int = 0):
self.possession_time = possession_time
self.points = points
self.assists = assists
self.saves = saves
self.stuns = stuns
self.goals = goals
self.passes = passes
self.catches = catches
self.steals = steals
self.blocks = blocks
self.interceptions = interceptions
self.shots_taken = shots_taken
|
class Stats():
'''Statistics about a player or team within the current game
:param possession_time:
Time in seconds that the subject posessed the disk.
:param points:
Points scored by the subject.
:param assists:
Number of goals assisted by the subject.
:param saves:
Number of opposing team goals prevented by the subject.
:param stuns:
Number of times the subject has stunned the opposing team.
:param goals:
Number of goals scored by the subject.
TODO: API always returns zero for teams?
:param passes:
Number of times the subject successfully completed a pass
TODO: API always returns zero for teams?
:param catches:
Number of times the subject succssfully caught a pass by a team member
:param steals:
Number of times the subject stole the disk from the opposing team
:param blocks:
Number of times the subject blocked a punch
TODO: API always returns zero for teams?
:param interceptions:
Number of times the subject intercepted a pass by the opposing team
TODO: API always returns zero for teams?
:param shots_taken:
Number of times the subject attempted a shot on goal
'''
def __init__(self, possession_time: float = 0.0, points: int = 0,
assists: int = 0, saves: int = 0, stuns: int = 0,
goals: int = 0, passes: int = 0, catches: int = 0,
steals: int = 0, blocks: int = 0, interceptions: int = 0,
shots_taken: int = 0):
pass
| 2 | 1 | 17 | 0 | 17 | 0 | 1 | 1.67 | 0 | 2 | 0 | 0 | 1 | 12 | 1 | 1 | 49 | 1 | 18 | 18 | 12 | 30 | 14 | 14 | 12 | 1 | 0 | 0 | 1 |
3,612 |
Ajedi32/echovr-api
|
Ajedi32_echovr-api/echovr_api/player.py
|
echovr_api.player.Player
|
class Player():
"""Represents the state of a single player in the current game
Initialized using data directly from the Echo VR API. See `the Echo VR API
documentation`__ for further details on the attributes associated with this
class, and the expected intialization parameters.
__ https://github.com/Ajedi32/echovr_api_docs#teamsplayers
:param name:
The username of the player.
:param playerid:
A number representing ID of the player within the current game session.
:param userid:
A unique number identifying the player across all game sessions.
:param level:
A number (1-50) representing the player's experience "level".
:param number:
The number a player chose for themselves in the customization room.
:param possession:
Indicates whether this player currently has posession of the disk.
:param stunned:
Whether the player is currently stunned.
:param blocking:
Whether the player is currently blocking.
:param invulnerable:
Whether or not the player is currently immune to stuns.
:param position:
The current `position`_ of the player within the arena
:param velocity:
The current `velocity`_ (speed and direction of movement) of the player.
:param lhand:
The `position`_ of the player's left hand within the Arena.
:param rhand:
The `position`_ of the player's right hand within the Arena.
:param forward:
The `direction`_ that the player's head is facing.
:param left:
The `direction`_ that the left side of the player's head is facing.
:param up:
The `direction`_ that the top side of the player's head is facing.
:param stats:
A dict containing data used to instantiate the player's current stats.
.. _position:
.. _direction:
.. _velocity: https://github.com/Ajedi32/echovr_api_docs#vectors
"""
def __init__(self, name: str = "", playerid: int = None, userid: int = None,
level: int = 0, number: int = 0,
possession: bool = False, stunned: bool = False,
blocking: bool = False, invulnerable: bool = False,
position: List[float] = None,
velocity: List[float] = None, lhand: List[float] = None,
rhand: List[float] = None, forward: List[float] = None,
left: List[float] = None, up: List[float] = None,
stats: dict = {}):
#: The username of the player.
self.name = name
#: A integer representing ID of the player within the current game
#: session.
self.playerid = playerid
#: A unique integer identifying the player across all game sessions.
self.userid = userid
#: A integer (1-50) representing the player's experience "level".
self.level = level
#: The number a player chose for themselves in the customization room.
self.number = number
#: Whether this player currently has posession of the disk.
self.possession = possession
#: Whether the player is currently stunned.
self.stunned = stunned
#: Whether the player is currently blocking.
self.blocking = blocking
#: Whether or not the player is currently immune to stuns.
self.invulnerable = invulnerable
#: A :class:`~.Vector3D` represnting the position of the player's head
self.position = Vector3D(*position)
#: A :class:`~.Vector3D` representing the current speed and direction of
#: movement of the player.
self.velocity = Vector3D(*velocity)
#: A :class:`~.Vector3D` represnting the position of the player's left
#: hand
self.lhand = Vector3D(*lhand)
#: A :class:`~.Vector3D` represnting the position of the player's right
#: hand
self.rhand = Vector3D(*rhand)
#: A :class:`~.Vector3D` represnting the direction that the player's
#: head is facing.
self.forward = Vector3D(*forward)
#: A :class:`~.Vector3D` represnting the direction that the left side of
#: the player's head is facing.
self.left = Vector3D(*left)
#: A :class:`~.Vector3D` represnting the direction that the top of the
#: player's head is facing.
self.up = Vector3D(*up)
#: The :class:`~.Stats` object for this player
self.stats = Stats(**stats)
@property
def username(self):
"""The username of the player."""
return self.name
|
class Player():
'''Represents the state of a single player in the current game
Initialized using data directly from the Echo VR API. See `the Echo VR API
documentation`__ for further details on the attributes associated with this
class, and the expected intialization parameters.
__ https://github.com/Ajedi32/echovr_api_docs#teamsplayers
:param name:
The username of the player.
:param playerid:
A number representing ID of the player within the current game session.
:param userid:
A unique number identifying the player across all game sessions.
:param level:
A number (1-50) representing the player's experience "level".
:param number:
The number a player chose for themselves in the customization room.
:param possession:
Indicates whether this player currently has posession of the disk.
:param stunned:
Whether the player is currently stunned.
:param blocking:
Whether the player is currently blocking.
:param invulnerable:
Whether or not the player is currently immune to stuns.
:param position:
The current `position`_ of the player within the arena
:param velocity:
The current `velocity`_ (speed and direction of movement) of the player.
:param lhand:
The `position`_ of the player's left hand within the Arena.
:param rhand:
The `position`_ of the player's right hand within the Arena.
:param forward:
The `direction`_ that the player's head is facing.
:param left:
The `direction`_ that the left side of the player's head is facing.
:param up:
The `direction`_ that the top side of the player's head is facing.
:param stats:
A dict containing data used to instantiate the player's current stats.
.. _position:
.. _direction:
.. _velocity: https://github.com/Ajedi32/echovr_api_docs#vectors
'''
def __init__(self, name: str = "", playerid: int = None, userid: int = None,
level: int = 0, number: int = 0,
possession: bool = False, stunned: bool = False,
blocking: bool = False, invulnerable: bool = False,
position: List[float] = None,
velocity: List[float] = None, lhand: List[float] = None,
rhand: List[float] = None, forward: List[float] = None,
left: List[float] = None, up: List[float] = None,
stats: dict = {}):
pass
@property
def username(self):
'''The username of the player.'''
pass
| 4 | 2 | 35 | 9 | 14 | 13 | 1 | 2.27 | 0 | 6 | 1 | 0 | 2 | 17 | 2 | 2 | 120 | 22 | 30 | 29 | 18 | 68 | 21 | 20 | 18 | 1 | 0 | 0 | 2 |
3,613 |
Ajedi32/echovr-api
|
Ajedi32_echovr-api/echovr_api/last_score.py
|
echovr_api.last_score.LastScore
|
class LastScore():
"""Statistics about the most recent goal scored in the current game.
Initialized using data directly from the Echo VR API. See `the Echo VR API
documentation`__ for further details on the attributes associated with this
class, and the expected intialization parameters.
__ https://github.com/Ajedi32/echovr_api_docs#last_score
:param disc_speed: The speed of the disk when it entered the goal
:param team: "blue" or "orange" depending on which team scored
:param goal_type: A human-readable explanation of the type of goal scored
:param point_amount: The number of points scored (2 or 3)
:param distance_thrown: The distance the goal was scored from
:param person_scored: Username of the player who scored the goal
:param assist_scored: Username of the player who assisted the goal, if any
"""
def __init__(self, disc_speed: float = 0.0, team: str = "blue",
goal_type: str = "[NO GOAL]", point_amount: int = 0,
distance_thrown: float = 0.0,
person_scored: str = "[INVALID]",
assist_scored: str = "[INVALID]"):
#: The speed of the disk when it entered the goal, in meters/second
self.disc_speed = disc_speed
#: The :class:`~.Team.Color` of the team that scored
self.team_color = Team.Color.by_name(team)
#: A human-readable explanation of the type of goal scored
self.goal_type = goal_type
#: The number of points scored (2 or 3)
self.point_amount = point_amount
#: The distance the goal was scored from
self.distance_thrown = distance_thrown
#: The username of the player who scored the goal
self.person_scored_username = person_scored
#: The username of the player who assisted the goal, if any
self.assist_scored_username = assist_scored
|
class LastScore():
'''Statistics about the most recent goal scored in the current game.
Initialized using data directly from the Echo VR API. See `the Echo VR API
documentation`__ for further details on the attributes associated with this
class, and the expected intialization parameters.
__ https://github.com/Ajedi32/echovr_api_docs#last_score
:param disc_speed: The speed of the disk when it entered the goal
:param team: "blue" or "orange" depending on which team scored
:param goal_type: A human-readable explanation of the type of goal scored
:param point_amount: The number of points scored (2 or 3)
:param distance_thrown: The distance the goal was scored from
:param person_scored: Username of the player who scored the goal
:param assist_scored: Username of the player who assisted the goal, if any
'''
def __init__(self, disc_speed: float = 0.0, team: str = "blue",
goal_type: str = "[NO GOAL]", point_amount: int = 0,
distance_thrown: float = 0.0,
person_scored: str = "[INVALID]",
assist_scored: str = "[INVALID]"):
pass
| 2 | 1 | 26 | 7 | 12 | 7 | 1 | 1.54 | 0 | 5 | 2 | 1 | 1 | 7 | 1 | 1 | 44 | 11 | 13 | 13 | 7 | 20 | 9 | 9 | 7 | 1 | 0 | 0 | 1 |
3,614 |
Akay7/nosql2django
|
Akay7_nosql2django/nosql2django/tests/models.py
|
nosql2django.tests.models.User
|
class User(models.Model):
nick_name = models.CharField(max_length=256)
def __str__(self):
return self.nick_name
|
class User(models.Model):
def __str__(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 5 | 1 | 4 | 3 | 2 | 0 | 4 | 3 | 2 | 1 | 1 | 0 | 1 |
3,615 |
Akay7/nosql2django
|
Akay7_nosql2django/nosql2django/tests/models.py
|
nosql2django.tests.models.Tag
|
class Tag(models.Model):
title = models.CharField(max_length=256)
def __str__(self):
return self.title
|
class Tag(models.Model):
def __str__(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 5 | 1 | 4 | 3 | 2 | 0 | 4 | 3 | 2 | 1 | 1 | 0 | 1 |
3,616 |
Akay7/nosql2django
|
Akay7_nosql2django/nosql2django/tests/models.py
|
nosql2django.tests.models.Post
|
class Post(models.Model):
title = models.CharField(max_length=256)
summary = models.TextField()
updated = models.DateTimeField()
author = models.ForeignKey(User, related_name='posts')
tags = models.ManyToManyField(Tag, related_name='posts')
def __str__(self):
return self.title
|
class Post(models.Model):
def __str__(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 9 | 1 | 8 | 7 | 6 | 0 | 8 | 7 | 6 | 1 | 1 | 0 | 1 |
3,617 |
Akay7/nosql2django
|
Akay7_nosql2django/nosql2django/parser_mapper.py
|
nosql2django.parser_mapper.ParserMapper
|
class ParserMapper:
def __init__(self, source, mapping):
self.source = source
self.mapping = mapping
@staticmethod
def save_to_db(model_text_id, parsed_values):
"""save to db and return saved object"""
Model = apps.get_model(model_text_id)
# normalise values and separate to m2m, simple
simple_fields = {}
many2many_fields = {}
for field, value in parsed_values.items():
if (Model._meta.get_field(
field).get_internal_type() == 'ManyToManyField'):
many2many_fields[field] = value
elif (Model._meta.get_field(
field).get_internal_type() == 'DateTimeField'):
simple_fields[field] = time_parser.parse(value)
else:
simple_fields[field] = value
# ToDo: add unique identify parameter to field
# ToDo: allow unique identify m2m field
model, created = Model.objects.get_or_create(**simple_fields)
for field, value in many2many_fields.items():
setattr(model, field, value)
model.save()
return model
@staticmethod
def parse_obj(mapping, obj):
def _parse_single_obj(mapping, obj):
parsed_values = {}
for field in mapping.fields:
if isinstance(field.mapping, ObjectMapping):
value = ParserMapper.parse_obj(
field.mapping, obj[field.mapping.base_path] if field.mapping.base_path else obj
)
else:
value = deep_get(obj, field.mapping)
parsed_values[field.name] = value
return ParserMapper.save_to_db(mapping.model, parsed_values)
if isinstance(obj, list):
return [_parse_single_obj(mapping, i) for i in obj]
return _parse_single_obj(mapping, obj)
def put_to_models(self):
feed = feedparser.parse(self.source)
for e in feed['entries']:
ParserMapper.parse_obj(
self.mapping,
e[self.mapping.base_path] if self.mapping.base_path else e
)
|
class ParserMapper:
def __init__(self, source, mapping):
pass
@staticmethod
def save_to_db(model_text_id, parsed_values):
'''save to db and return saved object'''
pass
@staticmethod
def parse_obj(mapping, obj):
pass
def _parse_single_obj(mapping, obj):
pass
def put_to_models(self):
pass
| 8 | 1 | 13 | 2 | 11 | 1 | 3 | 0.09 | 0 | 1 | 0 | 0 | 2 | 2 | 4 | 4 | 61 | 11 | 46 | 20 | 38 | 4 | 34 | 18 | 28 | 5 | 0 | 2 | 15 |
3,618 |
Akay7/nosql2django
|
Akay7_nosql2django/demo/demo/migrations/0001_initial.py
|
demo.migrations.0001_initial.Migration
|
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256)),
('summary', models.TextField()),
('updated', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=256)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nick_name', models.CharField(max_length=256)),
],
),
migrations.AddField(
model_name='post',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts', to='demo.User'),
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(related_name='posts', to='demo.Tag'),
),
]
|
class Migration(migrations.Migration):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 3 | 39 | 4 | 38 | 0 | 4 | 4 | 3 | 0 | 1 | 0 | 0 |
3,619 |
Akay7/nosql2django
|
Akay7_nosql2django/demo/demo/admin.py
|
demo.admin.UserAdmin
|
class UserAdmin(admin.ModelAdmin):
pass
|
class UserAdmin(admin.ModelAdmin):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 1 | 0 | 0 |
3,620 |
Akay7/nosql2django
|
Akay7_nosql2django/demo/demo/management/commands/habr.py
|
habr.Command
|
class Command(BaseCommand):
help = 'Parse habr feeds'
def handle(self, *args, **options):
source = 'https://habrahabr.ru/rss/hubs/all/'
mapping = ObjectMapping(
None, 'demo.Post',
(
FieldMapping('title', 'title'),
FieldMapping('summary', 'description'),
FieldMapping('tags',
ObjectMapping(
'tags', 'demo.Tag',
(FieldMapping('title', 'term'),)
)
),
FieldMapping('author',
ObjectMapping(
None, 'demo.User',
(FieldMapping('nick_name', 'author'),)
)
),
FieldMapping('updated', 'published')
)
)
parser_mapper = ParserMapper(source, mapping)
parser_mapper.put_to_models()
self.stdout.write(
self.style.SUCCESS("successfully parsed all habr feeds"))
|
class Command(BaseCommand):
def handle(self, *args, **options):
pass
| 2 | 0 | 28 | 2 | 26 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 31 | 3 | 28 | 6 | 26 | 0 | 8 | 6 | 6 | 1 | 1 | 0 | 1 |
3,621 |
Akay7/nosql2django
|
Akay7_nosql2django/demo/demo/admin.py
|
demo.admin.PostAdmin
|
class PostAdmin(admin.ModelAdmin):
pass
|
class PostAdmin(admin.ModelAdmin):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 1 | 0 | 0 |
3,622 |
Akay7/nosql2django
|
Akay7_nosql2django/nosql2django/tests/tests.py
|
nosql2django.tests.tests.TestParser
|
class TestParser(TestCase):
def test_can_get_model_without_any_nested_models(self):
self.assertEqual(User.objects.count(), 0)
mapping = ObjectMapping(
None, 'tests.User',
(FieldMapping('nick_name', 'author'),)
)
source = os.path.join(TESTS_DIR, 'habr_source.xml')
parser_mapper = ParserMapper(source, mapping)
parser_mapper.put_to_models()
self.assertEqual(User.objects.count(), 20)
# after restart not add duplicates to db
parser_mapper.put_to_models()
self.assertEqual(User.objects.count(), 20)
def test_can_get_many_nested_models(self):
self.assertEqual(Tag.objects.count(), 0)
mapping = ObjectMapping(
'tags', 'tests.Tag',
(FieldMapping('title', 'term'),)
)
source = os.path.join(TESTS_DIR, 'habr_source.xml')
parser_mapper = ParserMapper(source, mapping)
parser_mapper.put_to_models()
self.assertEqual(Tag.objects.count(), 129)
self.assertTrue(Tag.objects.filter(title="positive technologies").exists())
def test_can_get_model_with_nested_models(self):
self.assertEqual(Tag.objects.count(), 0)
self.assertEqual(User.objects.count(), 0)
self.assertEqual(Post.objects.count(), 0)
mapping = ObjectMapping(
None, 'tests.Post',
(
FieldMapping('title', 'title'),
FieldMapping('summary', 'description'),
FieldMapping('tags',
ObjectMapping(
'tags', 'tests.Tag',
(FieldMapping('title', 'term'),)
)
),
FieldMapping('author',
ObjectMapping(
None, 'tests.User',
(FieldMapping('nick_name', 'author'),)
)
),
FieldMapping('updated', 'published')
)
)
source = os.path.join(TESTS_DIR, 'habr_source.xml')
parser_mapper = ParserMapper(source, mapping)
parser_mapper.put_to_models()
# verify result
self.assertEqual(Tag.objects.count(), 129)
self.assertTrue(Tag.objects.filter(title="positive technologies").exists())
self.assertEqual(User.objects.count(), 20)
self.assertEqual(Post.objects.count(), 20)
self.assertNotEqual(Post.objects.first().title, '')
self.assertNotEqual(Post.objects.first().author, None)
tags_in_post_qty = len(
Post.objects.values_list('tags__title', flat=True).distinct()
)
self.assertEqual(tags_in_post_qty, 129)
def test_correct_parse_reddit(self):
mapping = ObjectMapping(
None, 'tests.Post',
(
FieldMapping('title', 'title'),
FieldMapping('author',
ObjectMapping(
None, 'tests.User',
(FieldMapping('nick_name', 'author'),)
)
),
FieldMapping('updated', 'updated')
)
)
source = os.path.join(TESTS_DIR, 'reddit_source.xml')
parser_mapper = ParserMapper(source, mapping)
parser_mapper.put_to_models()
self.assertEqual(Post.objects.count(), 25)
self.assertEqual(User.objects.count(), 21)
self.assertEqual(Tag.objects.count(), 0)
|
class TestParser(TestCase):
def test_can_get_model_without_any_nested_models(self):
pass
def test_can_get_many_nested_models(self):
pass
def test_can_get_model_with_nested_models(self):
pass
def test_correct_parse_reddit(self):
pass
| 5 | 0 | 23 | 3 | 20 | 1 | 1 | 0.02 | 1 | 4 | 4 | 0 | 4 | 0 | 4 | 4 | 97 | 14 | 81 | 18 | 76 | 2 | 42 | 18 | 37 | 1 | 1 | 0 | 4 |
3,623 |
Akay7/nosql2django
|
Akay7_nosql2django/demo/demo/admin.py
|
demo.admin.TagAdmin
|
class TagAdmin(admin.ModelAdmin):
pass
|
class TagAdmin(admin.ModelAdmin):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 1 | 1 | 0 | 2 | 1 | 1 | 0 | 1 | 0 | 0 |
3,624 |
Akay7/nosql2django
|
Akay7_nosql2django/demo/demo/management/commands/reddit.py
|
reddit.Command
|
class Command(BaseCommand):
help = 'Parse reddit feeds'
def handle(self, *args, **options):
source = 'https://www.reddit.com/r/news/.rss'
mapping = ObjectMapping(
None, 'demo.Post',
(
FieldMapping('title', 'title'),
FieldMapping('author',
ObjectMapping(
None, 'demo.User',
(FieldMapping('nick_name', 'author'),)
)
),
FieldMapping('updated', 'updated')
)
)
parser_mapper = ParserMapper(source, mapping)
parser_mapper.put_to_models()
self.stdout.write(
self.style.SUCCESS("successfully parsed all reddit feeds"))
|
class Command(BaseCommand):
def handle(self, *args, **options):
pass
| 2 | 0 | 22 | 3 | 19 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 1 | 25 | 4 | 21 | 6 | 19 | 0 | 8 | 6 | 6 | 1 | 1 | 0 | 1 |
3,625 |
Akhail/Tebless
|
Akhail_Tebless/tebless/devs/_debug.py
|
tebless.devs._debug.Debug
|
class Debug(object):
is_active = False
def __init__(self, obj):
self._obj = obj
@staticmethod
def log(label, content=None):
if content:
msg = pformat(content, 2, 80, compact=True)
debug(label + ":\n" + indent(msg, ' '))
def __enter__(self):
debug(f" < {self._obj} > ".center(80, "="))
return self
def __exit__(self, *args, **kwargs):
debug(f" </ {self._obj} > ".center(80, "=") + '\n')
|
class Debug(object):
def __init__(self, obj):
pass
@staticmethod
def log(label, content=None):
pass
def __enter__(self):
pass
def __exit__(self, *args, **kwargs):
pass
| 6 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 3 | 1 | 4 | 4 | 18 | 4 | 14 | 9 | 8 | 0 | 13 | 8 | 8 | 2 | 1 | 1 | 5 |
3,626 |
Akhail/Tebless
|
Akhail_Tebless/tebless/utils/__init__.py
|
tebless.utils.Store
|
class Store(MutableMapping, OrderedDict):
def __init__(self, *args, **kwargs):
self._map = OrderedDict()
self._dynamic = True
if kwargs:
if '_dynamic' in kwargs:
self._dynamic = kwargs['_dynamic']
if args:
d = args[0]
if isinstance(d, dict):
for k, v in self.__call_items(d):
if isinstance(v, dict):
v = Store(v, _dynamic=self._dynamic)
if type(v) is list:
ele = []
for i in v:
n = i
if type(i) is dict:
n = Store(i, _dynamic=self._dynamic)
ele.append(n)
v = ele
self._map[k] = v
if kwargs:
for k, v in self.__call_items(kwargs):
if k is not '_dynamic':
self._map[k] = v
def __call_items(self, obj):
if hasattr(obj, 'iteritems') and ismethod(getattr(obj, 'iteritems')):
return obj.iteritems()
else:
return obj.items()
def items(self):
return self.__call_items(self._map)
def __iter__(self):
return self._map.__iter__()
def next(self):
return self._map.next()
def __setitem__(self, k, v):
self._map[k] = v
def __getitem__(self, k):
if k not in self._map and \
self._dynamic and k != '_ipython_canary_method_should_not_exist_':
# automatically extend to new Store
self[k] = Store()
return self._map[k]
def __setattr__(self, k, v):
if k in {'_map', '_dynamic',
'_ipython_canary_method_should_not_exist_'}:
super(Store, self).__setattr__(k, v)
else:
self[k] = v
def __getattr__(self, k):
if k == {'_map', '_dynamic',
'_ipython_canary_method_should_not_exist_'}:
super(Store, self).__getattr__(k)
else:
return self[k]
def __delattr__(self, key):
return self._map.__delitem__(key)
def __contains__(self, k):
return self._map.__contains__(k)
def __str__(self):
items = []
for k, v in self.__call_items(self._map):
if id(v) == id(self):
items.append('{0}=Store(...)'.format(k))
else:
items.append('{0}={1}'.format(k, repr(v)))
joined = ', '.join(items)
out = '{0}({1})'.format(self.__class__.__name__, joined)
return out
def __repr__(self):
return str(self)
def empty(self):
return (not any(self))
# proper dict subclassing
def values(self):
return self._map.values()
# ipython support
def __dir__(self):
return self.keys()
@classmethod
def parseOther(self, other):
if type(other) is Store:
return other._map
else:
return other
def __cmp__(self, other):
other = Store.parseOther(other)
return self._map.__cmp__(other)
def __eq__(self, other):
other = Store.parseOther(other)
if not isinstance(other, dict):
return False
return self._map.__eq__(other)
def __ge__(self, other):
other = Store.parseOther(other)
return self._map.__ge__(other)
def __gt__(self, other):
other = Store.parseOther(other)
return self._map.__gt__(other)
def __le__(self, other):
other = Store.parseOther(other)
return self._map.__le__(other)
def __lt__(self, other):
other = Store.parseOther(other)
return self._map.__lt__(other)
def __ne__(self, other):
other = Store.parseOther(other)
return self._map.__ne__(other)
def __delitem__(self, key):
return self._map.__delitem__(key)
def __len__(self):
return self._map.__len__()
def clear(self):
self._map.clear()
def copy(self):
return Store(self)
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo=None):
return self.copy()
def get(self, key, default=None):
return self._map.get(key, default)
def keys(self):
return self._map.keys()
def pop(self, key, default=None):
return self._map.pop(key, default)
def popitem(self):
return self._map.popitem()
def setdefault(self, key, default=None):
self._map.setdefault(key, default)
def update(self, *args, **kwargs):
if len(args) != 0:
self._map.update(*args)
self._map.update(kwargs)
@classmethod
def fromkeys(cls, seq, value=None):
d = Store()
d._map = OrderedDict.fromkeys(seq, value)
return d
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
|
class Store(MutableMapping, OrderedDict):
def __init__(self, *args, **kwargs):
pass
def __call_items(self, obj):
pass
def items(self):
pass
def __iter__(self):
pass
def next(self):
pass
def __setitem__(self, k, v):
pass
def __getitem__(self, k):
pass
def __setattr__(self, k, v):
pass
def __getattr__(self, k):
pass
def __delattr__(self, key):
pass
def __contains__(self, k):
pass
def __str__(self):
pass
def __repr__(self):
pass
def empty(self):
pass
def values(self):
pass
def __dir__(self):
pass
@classmethod
def parseOther(self, other):
pass
def __cmp__(self, other):
pass
def __eq__(self, other):
pass
def __ge__(self, other):
pass
def __gt__(self, other):
pass
def __le__(self, other):
pass
def __lt__(self, other):
pass
def __ne__(self, other):
pass
def __delitem__(self, key):
pass
def __len__(self):
pass
def clear(self):
pass
def copy(self):
pass
def __copy__(self):
pass
def __deepcopy__(self, memo=None):
pass
def get(self, key, default=None):
pass
def keys(self):
pass
def pop(self, key, default=None):
pass
def popitem(self):
pass
def setdefault(self, key, default=None):
pass
def update(self, *args, **kwargs):
pass
@classmethod
def fromkeys(cls, seq, value=None):
pass
def __getstate__(self):
pass
def __setstate__(self, d):
pass
| 42 | 0 | 4 | 0 | 4 | 0 | 2 | 0.02 | 2 | 4 | 0 | 0 | 37 | 2 | 39 | 89 | 183 | 38 | 142 | 54 | 100 | 3 | 132 | 52 | 92 | 13 | 3 | 6 | 60 |
3,627 |
Akhail/Tebless
|
Akhail_Tebless/tebless/utils/keyboard.py
|
tebless.utils.keyboard.Keyboard
|
class Keyboard(object):
def __getattr__(self, name):
assert isinstance(name, str)
if name.startswith("KEY_") and name in TERM.__dict__:
return TERM.__dict__.get(name)
raise AttributeError(
"type object 'Keyboard' has no attribute '{}'".format(name))
|
class Keyboard(object):
def __getattr__(self, name):
pass
| 2 | 0 | 7 | 1 | 6 | 0 | 2 | 0 | 1 | 2 | 0 | 0 | 1 | 0 | 1 | 1 | 8 | 1 | 7 | 2 | 5 | 0 | 6 | 2 | 4 | 2 | 1 | 1 | 2 |
3,628 |
Akhail/Tebless
|
Akhail_Tebless/tebless/utils/styles.py
|
tebless.utils.styles.Style
|
class Style(object):
def __getattr__(self, name):
formatters = blessed.formatters.split_compound(name)
compoundables = blessed.formatters.COMPOUNDABLES
colors = blessed.formatters.COLORS
if name in colors or all(fmt in compoundables for fmt in formatters):
return TERM.__getattr__(name)
else:
raise AttributeError(
"type object 'Style' has no attribute '{}'".format(name))
def underline_ns(self, text):
tmp = text.strip(' ')
return text.replace(tmp, Style().underline(tmp))
|
class Style(object):
def __getattr__(self, name):
pass
def underline_ns(self, text):
pass
| 3 | 0 | 6 | 0 | 6 | 0 | 2 | 0 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 2 | 14 | 1 | 13 | 7 | 10 | 0 | 11 | 7 | 8 | 2 | 1 | 1 | 3 |
3,629 |
Akhail/Tebless
|
Akhail_Tebless/tebless/widgets/checkbox.py
|
tebless.widgets.checkbox.CheckBox
|
class CheckBox(Widget):
def __init__(self,
label='CheckBox',
key=KEY_F1,
state=False,
render=None,
check=None,
*args, **kwargs):
super().__init__(on_key=self._on_key, *args, **kwargs)
self._label = label
self._state = state
self._render = render or '{check} {label}'
self._check = check or (lambda _state: '[ ]' if _state else '[X]')
self._key = key
def paint(self):
echo(self.term.move(self.y, self.x))
echo(self._render.format(check=self._check(self._state),
label=self._label))
def _on_key(self, key):
if key.code == self._key:
self.value = not self.value
@property
def label(self):
return self._label
@label.setter
def label(self, value):
self._label = value
self.on_change() # pylint: disable=E1101
@property
def value(self):
return self._state
@value.setter
def value(self, value):
if not isinstance(value, bool):
raise TypeError('Only supported boolean')
self._state = value
self.on_change()
|
class CheckBox(Widget):
def __init__(self,
label='CheckBox',
key=KEY_F1,
state=False,
render=None,
check=None,
*args, **kwargs):
pass
def paint(self):
pass
def _on_key(self, key):
pass
@property
def label(self):
pass
@label.setter
def label(self):
pass
@property
def value(self):
pass
@value.setter
def value(self):
pass
| 12 | 0 | 5 | 0 | 5 | 0 | 1 | 0.05 | 1 | 3 | 0 | 0 | 7 | 5 | 7 | 23 | 43 | 6 | 37 | 23 | 19 | 2 | 26 | 13 | 18 | 2 | 2 | 1 | 9 |
3,630 |
Akhail/Tebless
|
Akhail_Tebless/tebless/widgets/filter_menu.py
|
tebless.widgets.filter_menu.FilterMenu
|
class FilterMenu(Widget):
"""Widget with label and menu.
Create a new widget :class:`FilterMenu`
:param s_input: :class:`Input` options.
:param s_menu: :class:`Menu` options.
:param filter_items: function receive text, items filter and return result.
:type s_input: Input
:type s_menu: Menu
:type filter_items: function
>>> from tebless.widgets import Label, Window, Input, FilterMenu
... @Window.decorator
... def view(window):
... window += FilterMenu({
... 'label': "Search: "
... },{
... 'items': [str(x) for x in range(100)]
... })
... view()
"""
def __init__(self,
s_input,
s_menu,
filter_items=None,
*args, **kwargs):
super().__init__(*args, **kwargs)
self._text = ''
events = Events()
self.on_select = events.on_select
_s_menu = {
'on_enter': self.on_select,
'cordy': 1
}
_s_input = {
'on_change': self._on_change_input
}
_s_menu.update(s_menu)
_s_input.update(s_input)
self._filter = filter_items
self._input = Input(**_s_input)
self._menu = Menu(**_s_menu)
self._items = list(self._menu.items)
self.on_key += self._input.on_key
self.on_key_arrow += self._menu.on_key_arrow
self.on_enter += self._menu.on_enter
def _on_change_input(self, *_):
text = self._input.value.lower()
def filt(text, items):
return filter(lambda item: text.lower() in item.lower(), items)
_filter = self._filter or filt
item_filter = _filter(text, self._items.copy())
self._menu.items = item_filter
def paint(self):
self._menu.paint()
self._input.paint()
def destroy(self):
self._input.destroy()
self._menu.destroy()
|
class FilterMenu(Widget):
'''Widget with label and menu.
Create a new widget :class:`FilterMenu`
:param s_input: :class:`Input` options.
:param s_menu: :class:`Menu` options.
:param filter_items: function receive text, items filter and return result.
:type s_input: Input
:type s_menu: Menu
:type filter_items: function
>>> from tebless.widgets import Label, Window, Input, FilterMenu
... @Window.decorator
... def view(window):
... window += FilterMenu({
... 'label': "Search: "
... },{
... 'items': [str(x) for x in range(100)]
... })
... view()
'''
def __init__(self,
s_input,
s_menu,
filter_items=None,
*args, **kwargs):
pass
def _on_change_input(self, *_):
pass
def filt(text, items):
pass
def paint(self):
pass
def destroy(self):
pass
| 6 | 1 | 9 | 1 | 8 | 0 | 1 | 0.46 | 1 | 3 | 0 | 0 | 4 | 6 | 4 | 20 | 70 | 13 | 39 | 22 | 29 | 18 | 30 | 18 | 24 | 1 | 2 | 0 | 5 |
3,631 |
Akhail/Tebless
|
Akhail_Tebless/tebless/widgets/menu.py
|
tebless.widgets.menu.Menu
|
class Menu(Widget):
"""Widget show a list of elements.
:param cordx: Position on axis X
:param cordy: Position on axis Y
:param items: Element to show
:param is_menu: Is a menu or only show items
:param limit: Max items to show
:param header: Text header of table
:param footer: Text footer of table
:param selector: A function that return text to show on select
:param width: Width of table
:param empty: Whats show if table is empty
:param key: A function return text of object in list
"""
def __init__(self, items=None, *args, **kwargs):
super().__init__(items=items,
on_key_arrow=self._on_key_arrow,
*args, **kwargs)
self._items = items or []
self._len_items = len(self._items)
self._empty = kwargs.get('empty', ['Sin elementos'])
self._is_menu = kwargs.get('is_menu', True)
self._limit = round(kwargs.get('limit', 4))
if 'width' not in kwargs:
self._width = self.term.width
self._header = kwargs.get('header', '')
self._footer = kwargs.get('footer', '')
def selector(text, **kwargs):
return red('| ') + text if self.term.length(text) > 0 else text
self._selector = kwargs.get('selector', selector)
self._key = kwargs.get('key', lambda x: x)
self._formater = kwargs.get(
'formater', lambda text, **kw: ' ' + text[:self._width])
self._page = 1
self._index = 0
self._height = 0
def _on_key_arrow(self, key):
if key.code == KEY_DOWN:
self.index = (self.index + 1) % self._len_items
elif key.code == KEY_UP:
self.index = (self.index - 1) % self._len_items
def paint(self):
self._page = ceil((self._index + 1) / self._limit)
echo(self.term.move(self.y, self.x))
header_height, footer_height = 0, 0
if self._header != '':
header_height = len(self._header.split('\n'))
if self._footer != '':
footer_height = len(self._footer.split('\n'))
items = self.items if self.items else self._empty
first = floor(self._index / self._limit) * self._limit
max_page = ceil(len(items) / self._limit)
items = items[first:self._limit + first]
vars_op = {
'page': self._page,
'last': max_page,
'count': self._len_items
}
# Print header
if self._header != '':
echo(self._header.format(**vars_op) + '\n')
self._height = header_height
# Print elements
for idx, item in enumerate(items):
array_text = self._key(item)
if isinstance(array_text, str):
array_text = [array_text]
for index, text in enumerate(array_text):
echo(self.term.move_x(self.x))
tmp = self._formater(**{
'text': text,
'index': index,
'lenght': len(array_text)
})
pos = self._index % self._limit == idx
if pos and self._is_menu and text != '':
tmp = self._selector(**{
'text': text[:self.width],
'index': pos,
'lenght': len(array_text)
})
tmp += '\n'
self._height += tmp.count('\n')
echo(tmp)
# Print footer
if self._footer != '':
echo(self.term.move_x(self.x))
echo(self._footer.format(**vars_op))
self._height += footer_height
@property
def value(self):
return self.items[self._index]
@property
def index(self):
return self._index
@index.setter
def index(self, value):
self._index = value
self.on_change()
@property
def items(self):
return list(self._items)
@items.setter
def items(self, value):
self._index = 0
self._items = list(value)
self._len_items = len(self._items)
self.on_change()
|
class Menu(Widget):
'''Widget show a list of elements.
:param cordx: Position on axis X
:param cordy: Position on axis Y
:param items: Element to show
:param is_menu: Is a menu or only show items
:param limit: Max items to show
:param header: Text header of table
:param footer: Text footer of table
:param selector: A function that return text to show on select
:param width: Width of table
:param empty: Whats show if table is empty
:param key: A function return text of object in list
'''
def __init__(self, items=None, *args, **kwargs):
pass
def selector(text, **kwargs):
pass
def _on_key_arrow(self, key):
pass
def paint(self):
pass
@property
def value(self):
pass
@property
def index(self):
pass
@index.setter
def index(self):
pass
@property
def items(self):
pass
@items.setter
def items(self):
pass
| 15 | 1 | 12 | 1 | 10 | 0 | 2 | 0.17 | 1 | 4 | 0 | 0 | 8 | 14 | 8 | 24 | 132 | 24 | 92 | 39 | 77 | 16 | 71 | 34 | 61 | 10 | 2 | 3 | 22 |
3,632 |
Akhail/Tebless
|
Akhail_Tebless/tebless/widgets/label.py
|
tebless.widgets.label.Label
|
class Label(Widget):
"""Print text in window.
Create a new widget :class:`Label`
:param text: What to print
:param align: Text alignment ('cente', 'left', 'right')
:param width: Maximum width of label is ignored if wrap is False
:param height: Maximum height of label is ignored if wrap is False
:param wrap: The text should be limited
:type text: str
:type align: str
:type width: int, float
:type height: int, float
:type wrap: bool
:Example:
>>> from tebless.widgets import Label, Window
>>> @Window.decorator(main=True)
... def view(window):
... window += Label(text="Hello world!", cordx=2,
... cordy=2, width=10, height=2, align='center')
"""
def __init__(self,
text='Label',
align='left',
width=20,
height=1,
*args, **kwargs):
params = dict(text=text, align=align,
width=width or 20, height=height or 0)
super().__init__(*args, **params, **kwargs)
self._text = text
self._prev = ''
self._wrap = (lambda x, **kw: [x]) if width is None else self.term.wrap
if align == 'right':
self._align = self.term.rjust
elif align == 'center':
self._align = self.term.center
elif align == 'left':
self._align = self.term.ljust
else:
raise ValueError("Only align center, left, right")
def paint(self):
wrapped = self._wrap(self.value, width=self.width)[:self.height]
wrapped = map(partial(self._align, width=self.width), wrapped)
wrapped = ''.join(
self.term.move(idx + self.y, self.x) + value
for idx, value in enumerate(wrapped)
)
echo(wrapped)
@property
def value(self):
return self._text
@value.setter
def value(self, text):
self._prev = self._text
self._text = text
self.on_change()
def destroy(self):
wrapped_text = self._wrap(self._prev, width=self.width)[:self.height]
lines = ''.join(
self.term.move(idx + self.y, self.x) + ' ' * self.term.length(text)
for idx, text in enumerate(wrapped_text)
)
echo(lines)
@property
def height(self):
if self._height == 0:
return len(self._wrap(self.value, width=self.width))
return self._height
|
class Label(Widget):
'''Print text in window.
Create a new widget :class:`Label`
:param text: What to print
:param align: Text alignment ('cente', 'left', 'right')
:param width: Maximum width of label is ignored if wrap is False
:param height: Maximum height of label is ignored if wrap is False
:param wrap: The text should be limited
:type text: str
:type align: str
:type width: int, float
:type height: int, float
:type wrap: bool
:Example:
>>> from tebless.widgets import Label, Window
>>> @Window.decorator(main=True)
... def view(window):
... window += Label(text="Hello world!", cordx=2,
... cordy=2, width=10, height=2, align='center')
'''
def __init__(self,
text='Label',
align='left',
width=20,
height=1,
*args, **kwargs):
pass
def paint(self):
pass
@property
def value(self):
pass
@value.setter
def value(self):
pass
def destroy(self):
pass
@property
def height(self):
pass
| 10 | 1 | 8 | 0 | 8 | 0 | 2 | 0.39 | 1 | 6 | 0 | 0 | 6 | 4 | 6 | 22 | 80 | 12 | 49 | 24 | 34 | 19 | 31 | 15 | 24 | 5 | 2 | 1 | 11 |
3,633 |
Akhail/Tebless
|
Akhail_Tebless/tebless/widgets/input.py
|
tebless.widgets.input.Input
|
class Input(Widget):
"""Input widget with label.
:param text: placeholder text
:param label: Desc of input
:param align: center, ljust, rjust text
:param fill_c: blank space
:param cursor: pointer
:param left_l: left terminator
:param right_l: right terminator
:param max_len: max string length
:param validation: a regex string to validate input
:param text_style: apply to text
:type text: str
:type label: str
:type align: str
:type fill_c: str
:type cursor: str
:type left_l: str
:type right_l: str
:type max_len: int
:type validation: regex
:type text_style: func
:Example:
>>> from tebless.widgets import Input, Window
>>> @Window.decorator(main=True)
... def view(window):
... window += Input(label="Insert text", cordx=2,
... cordy=2, width=10, align='center')
"""
def __init__(self,
text='',
label='',
align='left',
max_len=6,
*args, **kwargs):
params = dict(text=text, label=label, max_len=round(max_len))
super().__init__(on_key=self._on_key, *args, **params, **kwargs)
self._text = text
self._label = label
self._max_len = round(max_len)
self._fill_c = kwargs.get('fill_c', '_')
self._cursor = kwargs.get('cursor', '_')
self._left_l = kwargs.get('left_l', ' [ ')
self._right_l = kwargs.get('right_l', ' ]')
self._validation = kwargs.get('validation', r'.')
self._text_style = kwargs.get('text_style', lambda x: x)
if align == 'left':
self._align = self.term.ljust
elif align == 'center':
self._align = self.term.center
elif align == 'right':
self._align = self.term.rjust
else:
raise ValueError('Only valids aligns: left, right, center')
if self.term.length(self._text) > self._max_len:
raise ValueError('text is too long')
elif self.term.length(self._fill_c) > 1:
raise ValueError('fill_c need a char')
elif self.term.length(self._cursor) > 1:
raise ValueError('cursor need a char')
def _on_key(self, key):
correct_len = self.term.length(self.value) < self._max_len
validations = re.match(self._validation, key) and key.isprintable()
# TODO: Add event on fail validation
if correct_len and validations:
self.value += key
elif key.code in (KEY_BACKSPACE, KEY_DELETE) and self.value:
self.value = self.value[:-1]
def paint(self):
text = self._text_style(self.value)
if self.term.length(self.value) < self._max_len:
text = text + self._cursor
text = self._align(text, fillchar=self._fill_c, width=self._max_len)
input_field = self._left_l + text + self._right_l # [_______]
echo(self.term.move(self.y, self.x) +
self._label + input_field) # label
@property
def width(self):
len_widget = self.term.length(
self._label) + self.term.length(self._right_l)
len_widget += self.term.length(self._left_l) + self._max_len
return len_widget
@property
def height(self):
return 1
@property
def value(self):
return self._text
@property
def label(self):
return self._label
@label.setter
def label(self, value):
if not isinstance(value, str):
raise TypeError('Only supported string')
self._label = value
self.on_change()
@value.setter
def value(self, value):
if not (isinstance(value, str) or isinstance(value, int)):
raise TypeError('Only supported string or int')
self._text = str(value)
self.on_change()
|
class Input(Widget):
'''Input widget with label.
:param text: placeholder text
:param label: Desc of input
:param align: center, ljust, rjust text
:param fill_c: blank space
:param cursor: pointer
:param left_l: left terminator
:param right_l: right terminator
:param max_len: max string length
:param validation: a regex string to validate input
:param text_style: apply to text
:type text: str
:type label: str
:type align: str
:type fill_c: str
:type cursor: str
:type left_l: str
:type right_l: str
:type max_len: int
:type validation: regex
:type text_style: func
:Example:
>>> from tebless.widgets import Input, Window
>>> @Window.decorator(main=True)
... def view(window):
... window += Input(label="Insert text", cordx=2,
... cordy=2, width=10, align='center')
'''
def __init__(self,
text='',
label='',
align='left',
max_len=6,
*args, **kwargs):
pass
def _on_key(self, key):
pass
def paint(self):
pass
@property
def width(self):
pass
@property
def height(self):
pass
@property
def value(self):
pass
@property
def label(self):
pass
@label.setter
def label(self):
pass
@value.setter
def value(self):
pass
| 16 | 1 | 8 | 1 | 7 | 0 | 2 | 0.42 | 1 | 6 | 0 | 0 | 9 | 10 | 9 | 25 | 123 | 20 | 74 | 37 | 53 | 31 | 55 | 26 | 45 | 7 | 2 | 1 | 20 |
3,634 |
Akhail/Tebless
|
Akhail_Tebless/tebless/widgets/window.py
|
tebless.widgets.window.Window
|
class Window(Widget):
"""Class that encapsulates a whole window and allows to own the elements
inside.
:param store: Global storage is necessary
:param parent: If you do not provider it is the main window
:type store: Store
:type parent: Window
:example:
>>> with Window (store) as window:
... Window += element
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._width, self._height = self.term.width, self.term.height
if not isinstance(self.store, Store):
raise TypeError("Store is invalid")
def worker(func, time):
sleep(time)
func()
def timeout(func, *args, **kwargs):
timeout = kwargs.pop('time', None)
if timeout is None:
raise TypeError("take two arguments func and time")
thread = Thread(target=worker, args=(
partial(func, *args, **kwargs), timeout))
thread.start()
self.timeout = timeout
self._is_active = False
self._listen = True
self._widgets = []
events = Events()
self.on_enter = events.on_enter
self.on_key_arrow = events.on_key_arrow
self.on_exit = events.on_exit
self.on_exit += self.close
self.on_key = events.on_key
def paint(self):
echo(self.term.clear)
for widget in self._widgets:
widget.paint()
def close(self):
"""Close this window.
"""
self._listen = False
def listen(self):
"""Blocking call on widgets.
"""
while self._listen:
key = u''
key = self.term.inkey(timeout=0.2)
try:
if key.code == KEY_ENTER:
self.on_enter(key=key)
elif key.code in (KEY_DOWN, KEY_UP):
self.on_key_arrow(key=key)
elif key.code == KEY_ESCAPE or key == chr(3):
self.on_exit(key=key)
elif key != '':
self.on_key(key=key)
except KeyboardInterrupt:
self.on_exit(key=key)
def add(self, widget, *args, **kwargs):
"""Insert new element.
Usage:
window.add(widget, **{
'prop1': val,
'prop2': val2
})
"""
ins_widget = widget(*args, **kwargs)
self.__iadd__(ins_widget)
return ins_widget
def __iadd__(self, widgets):
"""Insert new element.
Usage:
window += widget(**{
'prop1': val,
'prop2': val2
})
"""
if not isinstance(widgets, (list, tuple)):
if not isinstance(widgets, Widget):
raise TypeError("Only Widgets and list of widgets")
widgets = [widgets]
for widget in widgets:
if not isinstance(widget, Widget):
raise TypeError("Only Widgets")
if widget.ref:
name = widget.ref
if name in self.store:
raise KeyError(name + ' key already exist')
self.store.update({
name: widget
})
widget.parent = self
widget.store = self.store
# FIXME: Solve if after add element, add a listenner fail
self.on_enter += widget.on_enter
self.on_key_arrow += widget.on_key_arrow
self.on_exit += widget.on_exit
self.on_key += widget.on_key
self._widgets.append(widget)
if self._is_active:
widget.paint()
return self
@staticmethod
def decorator(function=None, **d_wargs):
def _decorator(func):
def wrapper(*args, **kwargs):
min_x = d_wargs.get('min_x', 0)
min_y = d_wargs.get('min_y', 0)
if 'store' in d_wargs and 'store' in kwargs:
raise SyntaxError("store argument repeated")
elif 'store' in d_wargs:
store = d_wargs.get('store')
elif 'store' in kwargs:
store = kwargs.pop('store')
else:
store = Store()
if not store.get('windows'):
store.windows = [None]
tmp = None
with Window(parent=store.windows[-1], store=store) as win:
tmp = win
store.windows.append(tmp)
if win.height < min_y:
raise RuntimeError("Window height is insufficient")
elif win.width < min_x:
raise RuntimeError("Window width is insufficient")
func(win, *args, **kwargs)
store.windows.remove(tmp)
if d_wargs.get('main', False) is True:
wrapper()
return wrapper
if function:
return _decorator(function)
return _decorator
@property
def size(self):
""" Height and Width of window. """
return self._width, self._height
def __enter__(self):
echo(self.term.clear)
return self
def __exit__(self, _type, _value, _traceback):
if not self._widgets:
raise IndexError('Not widgets found')
self._is_active = True
if self._parent is None:
with self.term.cbreak(), self.term.hidden_cursor():
self.paint()
self.listen()
else:
self.paint()
self.listen()
if self._parent is not None:
self._parent.paint()
else:
echo(self.term.clear)
|
class Window(Widget):
'''Class that encapsulates a whole window and allows to own the elements
inside.
:param store: Global storage is necessary
:param parent: If you do not provider it is the main window
:type store: Store
:type parent: Window
:example:
>>> with Window (store) as window:
... Window += element
'''
def __init__(self, *args, **kwargs):
pass
def worker(func, time):
pass
def timeout(func, *args, **kwargs):
pass
def paint(self):
pass
def close(self):
'''Close this window.
'''
pass
def listen(self):
'''Blocking call on widgets.
'''
pass
def add(self, widget, *args, **kwargs):
'''Insert new element.
Usage:
window.add(widget, **{
'prop1': val,
'prop2': val2
})
'''
pass
def __iadd__(self, widgets):
'''Insert new element.
Usage:
window += widget(**{
'prop1': val,
'prop2': val2
})
'''
pass
@staticmethod
def decorator(function=None, **d_wargs):
pass
def _decorator(func):
pass
def wrapper(*args, **kwargs):
pass
@property
def size(self):
''' Height and Width of window. '''
pass
def __enter__(self):
pass
def __exit__(self, _type, _value, _traceback):
pass
| 17 | 6 | 16 | 1 | 13 | 1 | 3 | 0.23 | 1 | 12 | 1 | 0 | 9 | 10 | 10 | 26 | 188 | 29 | 129 | 39 | 112 | 30 | 115 | 36 | 100 | 8 | 2 | 3 | 41 |
3,635 |
Akrog/cinderlib
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Akrog_cinderlib/cinderlib/tests/unit/test_cinderlib.py
|
cinderlib.tests.unit.test_cinderlib.TestCinderlib
|
class TestCinderlib(base.BaseTest):
def test_lib_assignations(self):
self.assertEqual(cinderlib.setup, cinderlib.Backend.global_setup)
self.assertEqual(cinderlib.Backend, cinderlib.objects.Backend)
self.assertEqual(cinderlib.Backend,
cinderlib.objects.Object.backend_class)
@mock.patch('oslo_utils.importutils.import_object')
@mock.patch('cinderlib.Backend._set_backend_config')
@mock.patch('cinderlib.Backend.global_setup')
def test_init(self, mock_global_setup, mock_config, mock_import):
cfg.CONF.set_override('host', 'host')
driver_cfg = {'k': 'v', 'k2': 'v2', 'volume_backend_name': 'Test'}
cinderlib.Backend.global_initialization = False
driver = mock_import.return_value
driver.capabilities = {'pools': [{'pool_name': 'default'}]}
backend = objects.Backend(**driver_cfg)
mock_global_setup.assert_called_once_with()
self.assertIn('Test', objects.Backend.backends)
self.assertEqual(backend, objects.Backend.backends['Test'])
mock_config.assert_called_once_with(driver_cfg)
conf = mock_config.return_value
mock_import.assert_called_once_with(conf.volume_driver,
configuration=conf,
db=self.persistence.db,
host='host@Test',
cluster_name=None,
active_backend_id=None)
self.assertEqual(backend.driver, driver)
driver.do_setup.assert_called_once_with(objects.CONTEXT)
driver.check_for_setup_error.assert_called_once_with()
driver.init_capabilities.assert_called_once_with()
driver.set_throttle.assert_called_once_with()
driver.set_initialized.assert_called_once_with()
self.assertEqual(driver_cfg, backend._driver_cfg)
self.assertIsNone(backend._volumes)
driver.get_volume_stats.assert_not_called()
self.assertEqual(('default',), backend.pool_names)
@mock.patch('urllib3.disable_warnings')
@mock.patch('cinder.coordination.COORDINATOR')
@mock.patch('cinderlib.Backend._set_priv_helper')
@mock.patch('cinderlib.Backend._set_logging')
@mock.patch('cinderlib.cinderlib.serialization')
@mock.patch('cinderlib.Backend.set_persistence')
def test_global_setup(self, mock_set_pers, mock_serial, mock_log,
mock_sudo, mock_coord, mock_disable_warn):
cls = objects.Backend
cls.global_initialization = False
cinder_cfg = {'k': 'v', 'k2': 'v2'}
cls.global_setup('file_locks',
mock.sentinel.root_helper,
mock.sentinel.ssl_warnings,
mock.sentinel.disable_logs,
mock.sentinel.non_uuid_ids,
mock.sentinel.backend_info,
mock.sentinel.project_id,
mock.sentinel.user_id,
mock.sentinel.pers_cfg,
mock.sentinel.fail_missing_backend,
'mock.sentinel.host',
**cinder_cfg)
self.assertEqual('file_locks', cfg.CONF.oslo_concurrency.lock_path)
self.assertEqual('file://file_locks',
cfg.CONF.coordination.backend_url)
self.assertEqual(mock.sentinel.fail_missing_backend,
cls.fail_on_missing_backend)
self.assertEqual(mock.sentinel.root_helper, cls.root_helper)
self.assertEqual(mock.sentinel.project_id, cls.project_id)
self.assertEqual(mock.sentinel.user_id, cls.user_id)
self.assertEqual(mock.sentinel.non_uuid_ids, cls.non_uuid_ids)
self.assertEqual('mock.sentinel.host', cfg.CONF.host)
mock_set_pers.assert_called_once_with(mock.sentinel.pers_cfg)
self.assertEqual(cinderlib.__version__, cfg.CONF.version)
mock_serial.setup.assert_called_once_with(cls)
mock_log.assert_called_once_with(mock.sentinel.disable_logs)
mock_sudo.assert_called_once_with(mock.sentinel.root_helper)
mock_coord.start.assert_called_once_with()
self.assertEqual(2, mock_disable_warn.call_count)
self.assertTrue(cls.global_initialization)
self.assertEqual(mock.sentinel.backend_info,
cls.output_all_backend_info)
def test_pool_names(self):
pool_names = [mock.sentinel._pool_names]
self.backend._pool_names = pool_names
self.assertEqual(pool_names, self.backend.pool_names)
def test_volumes(self):
self.backend._volumes = None
res = self.backend.volumes
self.assertEqual(self.persistence.get_volumes.return_value, res)
self.assertEqual(self.persistence.get_volumes.return_value,
self.backend._volumes)
self.persistence.get_volumes.assert_called_once_with(
backend_name=self.backend.id)
def test_id(self):
self.assertEqual(self.backend._driver_cfg['volume_backend_name'],
self.backend.id)
def test_volumes_filtered(self):
res = self.backend.volumes_filtered(mock.sentinel.vol_id,
mock.sentinel.vol_name)
self.assertEqual(self.persistence.get_volumes.return_value, res)
self.assertEqual([], self.backend._volumes)
self.persistence.get_volumes.assert_called_once_with(
backend_name=self.backend.id,
volume_id=mock.sentinel.vol_id,
volume_name=mock.sentinel.vol_name)
def test_stats(self):
expect = {'pools': [mock.sentinel.data]}
with mock.patch.object(self.backend.driver, 'get_volume_stats',
return_value=expect) as mock_stat:
res = self.backend.stats(mock.sentinel.refresh)
self.assertEqual(expect, res)
mock_stat.assert_called_once_with(refresh=mock.sentinel.refresh)
def test_stats_single(self):
stat_value = {'driver_version': 'v1', 'key': 'value'}
expect = {'driver_version': 'v1', 'key': 'value',
'pools': [{'key': 'value', 'pool_name': 'fake_backend'}]}
with mock.patch.object(self.backend.driver, 'get_volume_stats',
return_value=stat_value) as mock_stat:
res = self.backend.stats(mock.sentinel.refresh)
self.assertEqual(expect, res)
mock_stat.assert_called_once_with(refresh=mock.sentinel.refresh)
@mock.patch('cinderlib.objects.Volume')
def test_create_volume(self, mock_vol):
kwargs = {'k': 'v', 'k2': 'v2'}
res = self.backend.create_volume(mock.sentinel.size,
mock.sentinel.name,
mock.sentinel.desc,
mock.sentinel.boot,
**kwargs)
self.assertEqual(mock_vol.return_value, res)
mock_vol.assert_called_once_with(self.backend, size=mock.sentinel.size,
name=mock.sentinel.name,
description=mock.sentinel.desc,
bootable=mock.sentinel.boot,
**kwargs)
mock_vol.return_value.create.assert_called_once_with()
def test__volume_removed_no_list(self):
vol = cinderlib.objects.Volume(self.backend, size=10)
self.backend._volume_removed(vol)
def test__volume_removed(self):
vol = cinderlib.objects.Volume(self.backend, size=10)
vol2 = cinderlib.objects.Volume(self.backend, id=vol.id, size=10)
self.backend._volumes.append(vol)
self.backend._volume_removed(vol2)
self.assertEqual([], self.backend.volumes)
def test__volume_created(self):
vol = cinderlib.objects.Volume(self.backend, size=10)
self.backend._volume_created(vol)
self.assertEqual([vol], self.backend.volumes)
def test__volume_created_is_none(self):
vol = cinderlib.objects.Volume(self.backend, size=10)
self.backend._volume_created(vol)
self.assertEqual([vol], self.backend.volumes)
def test_validate_connector(self):
self.backend.validate_connector(mock.sentinel.connector)
self.backend.driver.validate_connector.assert_called_once_with(
mock.sentinel.connector)
@mock.patch('cinderlib.objects.setup')
@mock.patch('cinderlib.persistence.setup')
def test_set_persistence(self, mock_pers_setup, mock_obj_setup):
cinderlib.Backend.global_initialization = True
cinderlib.Backend.set_persistence(mock.sentinel.pers_cfg)
mock_pers_setup.assert_called_once_with(mock.sentinel.pers_cfg)
self.assertEqual(mock_pers_setup.return_value,
cinderlib.Backend.persistence)
mock_obj_setup.assert_called_once_with(mock_pers_setup.return_value,
cinderlib.Backend,
self.backend.project_id,
self.backend.user_id,
self.backend.non_uuid_ids)
self.assertEqual(mock_pers_setup.return_value.db,
self.backend.driver.db)
def test_config(self):
self.backend.output_all_backend_info = False
res = self.backend.config
self.assertEqual({'volume_backend_name': self.backend.id}, res)
def test_config_full(self):
self.backend.output_all_backend_info = True
with mock.patch.object(self.backend, '_driver_cfg') as mock_driver:
res = self.backend.config
self.assertEqual(mock_driver, res)
def test_refresh(self):
self.backend.refresh()
self.persistence.get_volumes.assert_called_once_with(
backend_name=self.backend.id)
def test_refresh_no_call(self):
self.backend._volumes = None
self.backend.refresh()
self.persistence.get_volumes.assert_not_called()
|
class TestCinderlib(base.BaseTest):
def test_lib_assignations(self):
pass
@mock.patch('oslo_utils.importutils.import_object')
@mock.patch('cinderlib.Backend._set_backend_config')
@mock.patch('cinderlib.Backend.global_setup')
def test_init(self, mock_global_setup, mock_config, mock_import):
pass
@mock.patch('urllib3.disable_warnings')
@mock.patch('cinder.coordination.COORDINATOR')
@mock.patch('cinderlib.Backend._set_priv_helper')
@mock.patch('cinderlib.Backend._set_logging')
@mock.patch('cinderlib.cinderlib.serialization')
@mock.patch('cinderlib.Backend.set_persistence')
def test_global_setup(self, mock_set_pers, mock_serial, mock_log,
mock_sudo, mock_coord, mock_disable_warn):
pass
def test_pool_names(self):
pass
def test_volumes(self):
pass
def test_id(self):
pass
def test_volumes_filtered(self):
pass
def test_stats(self):
pass
def test_stats_single(self):
pass
@mock.patch('cinderlib.objects.Volume')
def test_create_volume(self, mock_vol):
pass
def test__volume_removed_no_list(self):
pass
def test__volume_removed_no_list(self):
pass
def test__volume_created(self):
pass
def test__volume_created_is_none(self):
pass
def test_validate_connector(self):
pass
@mock.patch('cinderlib.objects.setup')
@mock.patch('cinderlib.persistence.setup')
def test_set_persistence(self, mock_pers_setup, mock_obj_setup):
pass
def test_config(self):
pass
def test_config_full(self):
pass
def test_refresh(self):
pass
def test_refresh_no_call(self):
pass
| 33 | 0 | 9 | 1 | 9 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 20 | 1 | 20 | 23 | 218 | 30 | 188 | 53 | 154 | 0 | 129 | 44 | 108 | 1 | 2 | 1 | 20 |
3,636 |
Akrog/cinderlib
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Akrog_cinderlib/cinderlib/tests/unit/nos_brick.py
|
cinderlib.tests.unit.nos_brick.TestRBDConnector
|
class TestRBDConnector(base.BaseTest):
def setUp(self):
self.connector = nos_brick.RBDConnector('sudo')
self.connector.im_root = False
self.containerized = False
self.connector._setup_rbd_class = lambda *args: None
@mock.patch.object(nos_brick.RBDConnector, '_execute')
@mock.patch('os.makedirs')
def test__ensure_dir(self, mkdir_mock, exec_mock):
self.connector._ensure_dir(mock.sentinel.path)
exec_mock.assert_called_once_with('mkdir', '-p', '-m0755',
mock.sentinel.path, run_as_root=True)
mkdir_mock.assert_not_called()
@mock.patch.object(nos_brick.RBDConnector, '_execute')
@mock.patch('os.makedirs')
def test__ensure_dir_root(self, mkdir_mock, exec_mock):
self.connector.im_root = True
self.connector._ensure_dir(mock.sentinel.path)
mkdir_mock.assert_called_once_with(mock.sentinel.path, 0o755)
exec_mock.assert_not_called()
@mock.patch.object(nos_brick.RBDConnector, '_execute')
@mock.patch('os.makedirs', side_effect=OSError(errno.EEXIST, ''))
def test__ensure_dir_root_exists(self, mkdir_mock, exec_mock):
self.connector.im_root = True
self.connector._ensure_dir(mock.sentinel.path)
mkdir_mock.assert_called_once_with(mock.sentinel.path, 0o755)
exec_mock.assert_not_called()
@mock.patch.object(nos_brick.RBDConnector, '_execute')
@mock.patch('os.makedirs', side_effect=OSError(errno.EPERM, ''))
def test__ensure_dir_root_fails(self, mkdir_mock, exec_mock):
self.connector.im_root = True
with self.assertRaises(OSError) as exc:
self.connector._ensure_dir(mock.sentinel.path)
self.assertEqual(mkdir_mock.side_effect, exc.exception)
mkdir_mock.assert_called_once_with(mock.sentinel.path, 0o755)
exec_mock.assert_not_called()
@mock.patch('os.path.realpath')
@mock.patch.object(nos_brick.RBDConnector, '_execute')
@mock.patch.object(nos_brick.RBDConnector, '_ensure_dir')
@mock.patch('os.symlink')
def test__ensure_link(self, link_mock, dir_mock, exec_mock, path_mock):
source = '/dev/rbd0'
link = '/dev/rbd/rbd/volume-xyz'
self.connector._ensure_link(source, link)
dir_mock.assert_called_once_with('/dev/rbd/rbd')
exec_mock.assert_called_once_with('ln', '-s', '-f', source, link,
run_as_root=True)
link_mock.assert_not_called()
path_mock.assert_not_called()
@mock.patch('os.path.realpath')
@mock.patch.object(nos_brick.RBDConnector, '_execute')
@mock.patch.object(nos_brick.RBDConnector, '_ensure_dir')
@mock.patch('os.symlink')
def test__ensure_link_root(self, link_mock, dir_mock, exec_mock,
path_mock):
self.connector.im_root = True
source = '/dev/rbd0'
link = '/dev/rbd/rbd/volume-xyz'
self.connector._ensure_link(source, link)
dir_mock.assert_called_once_with('/dev/rbd/rbd')
exec_mock.assert_not_called()
link_mock.assert_called_once_with(source, link)
path_mock.assert_not_called()
@mock.patch('os.path.realpath')
@mock.patch.object(nos_brick.RBDConnector, '_execute')
@mock.patch.object(nos_brick.RBDConnector, '_ensure_dir')
@mock.patch('os.symlink', side_effect=OSError(errno.EEXIST, ''))
def test__ensure_link_root_exists(self, link_mock, dir_mock, exec_mock,
path_mock):
self.connector.im_root = True
source = '/dev/rbd0'
path_mock.return_value = source
link = '/dev/rbd/rbd/volume-xyz'
self.connector._ensure_link(source, link)
dir_mock.assert_called_once_with('/dev/rbd/rbd')
exec_mock.assert_not_called()
link_mock.assert_called_once_with(source, link)
@mock.patch('os.path.realpath')
@mock.patch.object(nos_brick.RBDConnector, '_execute')
@mock.patch.object(nos_brick.RBDConnector, '_ensure_dir')
@mock.patch('os.symlink', side_effect=OSError(errno.EPERM, ''))
def test__ensure_link_root_fails(self, link_mock, dir_mock, exec_mock,
path_mock):
self.connector.im_root = True
source = '/dev/rbd0'
path_mock.return_value = source
link = '/dev/rbd/rbd/volume-xyz'
with self.assertRaises(OSError) as exc:
self.connector._ensure_link(source, link)
self.assertEqual(link_mock.side_effect, exc.exception)
dir_mock.assert_called_once_with('/dev/rbd/rbd')
exec_mock.assert_not_called()
link_mock.assert_called_once_with(source, link)
@mock.patch('os.remove')
@mock.patch('os.path.realpath')
@mock.patch.object(nos_brick.RBDConnector, '_execute')
@mock.patch.object(nos_brick.RBDConnector, '_ensure_dir')
@mock.patch('os.symlink', side_effect=[OSError(errno.EEXIST, ''), None])
def test__ensure_link_root_replace(self, link_mock, dir_mock, exec_mock,
path_mock, remove_mock):
self.connector.im_root = True
source = '/dev/rbd0'
path_mock.return_value = '/dev/rbd1'
link = '/dev/rbd/rbd/volume-xyz'
self.connector._ensure_link(source, link)
dir_mock.assert_called_once_with('/dev/rbd/rbd')
exec_mock.assert_not_called()
remove_mock.assert_called_once_with(link)
self.assertListEqual(
[mock.call(source, link), mock.call(source, link)],
link_mock.mock_calls)
|
class TestRBDConnector(base.BaseTest):
def setUp(self):
pass
@mock.patch.object(nos_brick.RBDConnector, '_execute')
@mock.patch('os.makedirs')
def test__ensure_dir(self, mkdir_mock, exec_mock):
pass
@mock.patch.object(nos_brick.RBDConnector, '_execute')
@mock.patch('os.makedirs')
def test__ensure_dir_root(self, mkdir_mock, exec_mock):
pass
@mock.patch.object(nos_brick.RBDConnector, '_execute')
@mock.patch('os.makedirs', side_effect=OSError(errno.EEXIST, ''))
def test__ensure_dir_root_exists(self, mkdir_mock, exec_mock):
pass
@mock.patch.object(nos_brick.RBDConnector, '_execute')
@mock.patch('os.makedirs', side_effect=OSError(errno.EPERM, ''))
def test__ensure_dir_root_fails(self, mkdir_mock, exec_mock):
pass
@mock.patch('os.path.realpath')
@mock.patch.object(nos_brick.RBDConnector, '_execute')
@mock.patch.object(nos_brick.RBDConnector, '_ensure_dir')
@mock.patch('os.symlink')
def test__ensure_link(self, link_mock, dir_mock, exec_mock, path_mock):
pass
@mock.patch('os.path.realpath')
@mock.patch.object(nos_brick.RBDConnector, '_execute')
@mock.patch.object(nos_brick.RBDConnector, '_ensure_dir')
@mock.patch('os.symlink')
def test__ensure_link_root(self, link_mock, dir_mock, exec_mock,
path_mock):
pass
@mock.patch('os.path.realpath')
@mock.patch.object(nos_brick.RBDConnector, '_execute')
@mock.patch.object(nos_brick.RBDConnector, '_ensure_dir')
@mock.patch('os.symlink', side_effect=OSError(errno.EEXIST, ''))
def test__ensure_link_root_exists(self, link_mock, dir_mock, exec_mock,
path_mock):
pass
@mock.patch('os.path.realpath')
@mock.patch.object(nos_brick.RBDConnector, '_execute')
@mock.patch.object(nos_brick.RBDConnector, '_ensure_dir')
@mock.patch('os.symlink', side_effect=OSError(errno.EPERM, ''))
def test__ensure_link_root_fails(self, link_mock, dir_mock, exec_mock,
path_mock):
pass
@mock.patch('os.remove')
@mock.patch('os.path.realpath')
@mock.patch.object(nos_brick.RBDConnector, '_execute')
@mock.patch.object(nos_brick.RBDConnector, '_ensure_dir')
@mock.patch('os.symlink', side_effect=[OSError(errno.EEXIST, ''), None])
def test__ensure_link_root_replace(self, link_mock, dir_mock, exec_mock,
path_mock, remove_mock):
pass
| 40 | 0 | 8 | 0 | 8 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 10 | 2 | 10 | 13 | 122 | 11 | 111 | 38 | 67 | 0 | 74 | 23 | 63 | 1 | 2 | 1 | 10 |
3,637 |
Akrog/cinderlib
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Akrog_cinderlib/cinderlib/tests/unit/objects/test_connection.py
|
cinderlib.tests.unit.objects.test_connection.TestConnection
|
class TestConnection(base.BaseTest):
def setUp(self):
self.original_is_multipathed = objects.Connection._is_multipathed_conn
self.mock_is_mp = self.patch(
'cinderlib.objects.Connection._is_multipathed_conn')
self.mock_default = self.patch(
'os_brick.initiator.DEVICE_SCAN_ATTEMPTS_DEFAULT')
super(TestConnection, self).setUp()
self.vol = objects.Volume(self.backend_name, size=10)
self.kwargs = {'k1': 'v1', 'k2': 'v2'}
self.conn = objects.Connection(self.backend, volume=self.vol,
**self.kwargs)
self.conn._ovo.connection_info = {
'connector': {'multipath': mock.sentinel.mp_ovo_connector}}
def test_init(self):
expected = self.kwargs.copy()
expected['attach_mode'] = 'rw'
self.mock_is_mp.assert_called_once_with(expected)
self.assertEqual(self.conn.use_multipath, self.mock_is_mp.return_value)
self.assertEqual(self.conn.scan_attempts, self.mock_default)
self.assertEqual(self.conn.attach_mode, 'rw')
self.assertIsNone(self.conn._connector)
self.assertEqual(self.vol, self.conn._volume)
self.assertEqual(self.vol._ovo, self.conn._ovo.volume)
self.assertEqual(self.vol._ovo.id, self.conn._ovo.volume_id)
def test__is_multipathed_conn_kwargs(self):
res = self.original_is_multipathed(dict(
use_multipath=mock.sentinel.mp_kwargs,
connector={'multipath': mock.sentinel.mp_connector},
__ovo=self.conn._ovo))
self.assertEqual(mock.sentinel.mp_kwargs, res)
def test__is_multipathed_conn_connector_kwarg(self):
res = self.original_is_multipathed(dict(
connector={'multipath': mock.sentinel.mp_connector},
__ovo=self.conn._ovo))
self.assertEqual(mock.sentinel.mp_connector, res)
def test__is_multipathed_conn_connector_ovo(self):
res = self.original_is_multipathed(dict(connector={},
__ovo=self.conn._ovo))
self.assertEqual(mock.sentinel.mp_ovo_connector, res)
def test__is_multipathed_conn_connection_info_iscsi_true(self):
res = self.original_is_multipathed(dict(
connection_info={'conn': {'data': {'target_iqns': '',
'target_portals': ''}}}))
self.assertTrue(res)
def test__is_multipathed_conn_connection_info_iscsi_false(self):
res = self.original_is_multipathed(dict(
connection_info={'conn': {'data': {'target_iqns': ''}}}))
self.assertFalse(res)
def test__is_multipathed_conn_connection_info_fc_true(self):
res = self.original_is_multipathed(dict(
connection_info={'conn': {'data': {'target_wwn': []}}}))
self.assertTrue(res)
def test__is_multipathed_conn_connection_info_fc_false(self):
res = self.original_is_multipathed(dict(
connection_info={'conn': {'data': {'target_wwn': ''}}}))
self.assertFalse(res)
def test_init_no_backend(self):
self.assertRaises(TypeError, objects.Connection)
def test_init_no_volume(self):
self.mock_is_mp.reset_mock()
kwargs = self.kwargs.copy()
kwargs['attach_mode'] = 'ro'
conn = objects.Connection(self.backend, **kwargs)
self.mock_is_mp.assert_called_once_with(kwargs)
self.assertEqual(conn.use_multipath, self.mock_is_mp.return_value)
self.assertEqual(conn.scan_attempts, self.mock_default)
self.assertEqual(conn.attach_mode, 'ro')
self.assertIsNone(conn._connector)
def test_connect(self):
connector = {'my_c': 'v'}
conn = self.conn.connect(self.vol, connector)
init_conn = self.backend.driver.initialize_connection
init_conn.assert_called_once_with(self.vol, connector)
self.assertIsInstance(conn, objects.Connection)
self.assertEqual('attached', conn.status)
self.assertEqual(init_conn.return_value, conn.connection_info['conn'])
self.assertEqual(connector, conn.connector_info)
self.persistence.set_connection.assert_called_once_with(conn)
@mock.patch('cinderlib.objects.Volume._disconnect')
@mock.patch('cinderlib.objects.Connection._disconnect')
def test_disconnect(self, mock_disc, mock_vol_disc):
self.conn.disconnect(force=mock.sentinel.force)
mock_disc.assert_called_once_with(mock.sentinel.force)
mock_vol_disc.assert_called_once_with(self.conn)
def test__disconnect(self):
conn_info = self.conn.connector_info
self.conn._disconnect(mock.sentinel.force)
self.backend.driver.terminate_connection.assert_called_once_with(
self.vol._ovo, conn_info, force=mock.sentinel.force)
self.assertEqual({}, self.conn.conn_info)
self.assertEqual('detached', self.conn.status)
self.persistence.delete_connection.assert_called_once_with(self.conn)
@mock.patch('cinderlib.objects.Connection.conn_info', {'data': 'mydata'})
@mock.patch('cinderlib.objects.Connection.path')
@mock.patch('cinderlib.objects.Connection.device_attached')
def test_attach(self, mock_attached, mock_path):
with mock.patch('cinderlib.objects.Connection.connector') as mock_conn:
self.conn.attach()
mock_conn.connect_volume.assert_called_once_with('mydata')
mock_attached.assert_called_once_with(
mock_conn.connect_volume.return_value)
mock_conn.check_valid_device.assert_called_once_with(mock_path)
self.assertEqual(self.conn, self.vol.local_attach)
@mock.patch('cinderlib.objects.Connection.conn_info', {'data': 'mydata'})
@mock.patch('cinderlib.objects.Connection.device')
def test_detach(self, mock_device):
self.vol.local_attach = mock.Mock()
with mock.patch('cinderlib.objects.Connection.connector') as mock_conn:
self.conn.detach(mock.sentinel.force, mock.sentinel.ignore)
mock_conn.disconnect_volume.assert_called_once_with(
'mydata',
mock_device,
force=mock.sentinel.force,
ignore_errors=mock.sentinel.ignore)
self.assertIsNone(self.vol.local_attach)
self.assertIsNone(self.conn.device)
self.assertIsNone(self.conn._connector)
self.persistence.set_connection.assert_called_once_with(self.conn)
def test_get_by_id(self):
self.persistence.get_connections.return_value = [mock.sentinel.conn]
res = objects.Connection.get_by_id(mock.sentinel.conn_id)
self.assertEqual(mock.sentinel.conn, res)
self.persistence.get_connections.assert_called_once_with(
connection_id=mock.sentinel.conn_id)
def test_get_by_id_not_found(self):
self.persistence.get_connections.return_value = None
self.assertRaises(exception.ConnectionNotFound,
objects.Connection.get_by_id,
mock.sentinel.conn_id)
self.persistence.get_connections.assert_called_once_with(
connection_id=mock.sentinel.conn_id)
def test_device_attached(self):
self.conn.device_attached(mock.sentinel.device)
self.assertEqual(mock.sentinel.device,
self.conn.connection_info['device'])
self.persistence.set_connection.assert_called_once_with(self.conn)
def test_conn_info_setter(self):
self.conn.conn_info = mock.sentinel.conn_info
self.assertEqual(mock.sentinel.conn_info,
self.conn._ovo.connection_info['conn'])
def test_conn_info_setter_clear(self):
self.conn.conn_info = mock.sentinel.conn_info
self.conn.conn_info = {}
self.assertIsNone(self.conn._ovo.connection_info)
def test_conn_info_getter_default_attach_mode(self):
self.conn.conn_info = {'data': {}}
self.assertEqual({'data': {'access_mode': 'rw'}}, self.conn.conn_info)
def test_conn_info_getter_ro(self):
self.conn._ovo.attach_mode = 'ro'
self.conn.conn_info = {'data': {'target_lun': 0}}
self.assertEqual({'data': {'target_lun': 0, 'access_mode': 'ro'}},
self.conn.conn_info)
def test_conn_info_getter_none(self):
self.conn.conn_info = None
self.assertEqual({}, self.conn.conn_info)
def test_protocol(self):
self.conn.conn_info = {'driver_volume_type': mock.sentinel.iscsi}
self.assertEqual(mock.sentinel.iscsi, self.conn.protocol)
def test_connector_info_setter(self):
self.conn.connector_info = mock.sentinel.connector
self.assertEqual(mock.sentinel.connector,
self.conn._ovo.connection_info['connector'])
self.assertIn('connection_info', self.conn._ovo._changed_fields)
def test_connector_info_getter(self):
self.conn.connector_info = mock.sentinel.connector
self.assertEqual(mock.sentinel.connector, self.conn.connector_info)
def test_connector_info_getter_empty(self):
self.conn._ovo.connection_info = None
self.assertIsNone(self.conn.connector_info)
def test_device_setter(self):
self.conn.device = mock.sentinel.device
self.assertEqual(mock.sentinel.device,
self.conn._ovo.connection_info['device'])
self.assertIn('connection_info', self.conn._ovo._changed_fields)
def test_device_setter_none(self):
self.conn.device = mock.sentinel.device
self.conn.device = None
self.assertNotIn('device', self.conn._ovo.connection_info)
self.assertIn('connection_info', self.conn._ovo._changed_fields)
def test_device_getter(self):
self.conn.device = mock.sentinel.device
self.assertEqual(mock.sentinel.device, self.conn.device)
def test_path(self):
self.conn.device = {'path': mock.sentinel.path}
self.assertEqual(mock.sentinel.path, self.conn.path)
@mock.patch('cinderlib.objects.Connection.conn_info')
@mock.patch('cinderlib.objects.Connection.protocol')
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory')
def test_connector_getter(self, mock_connector, mock_proto, mock_info):
res = self.conn.connector
self.assertEqual(mock_connector.return_value, res)
mock_connector.assert_called_once_with(
mock_proto,
self.backend.root_helper,
use_multipath=self.mock_is_mp.return_value,
device_scan_attempts=self.mock_default,
conn=mock_info,
do_local_attach=True)
# Make sure we cache the value
res = self.conn.connector
self.assertEqual(1, mock_connector.call_count)
@ddt.data(True, False)
def test_attached_true(self, value):
with mock.patch('cinderlib.objects.Connection.device', value):
self.assertEqual(value, self.conn.attached)
@ddt.data(True, False)
def test_connected(self, value):
with mock.patch('cinderlib.objects.Connection.conn_info', value):
self.assertEqual(value, self.conn.connected)
|
class TestConnection(base.BaseTest):
def setUp(self):
pass
def test_init(self):
pass
def test__is_multipathed_conn_kwargs(self):
pass
def test__is_multipathed_conn_connector_kwarg(self):
pass
def test__is_multipathed_conn_connector_ovo(self):
pass
def test__is_multipathed_conn_connection_info_iscsi_true(self):
pass
def test__is_multipathed_conn_connection_info_iscsi_false(self):
pass
def test__is_multipathed_conn_connection_info_fc_true(self):
pass
def test__is_multipathed_conn_connection_info_fc_false(self):
pass
def test_init_no_backend(self):
pass
def test_init_no_volume(self):
pass
def test_connect(self):
pass
@mock.patch('cinderlib.objects.Volume._disconnect')
@mock.patch('cinderlib.objects.Connection._disconnect')
def test_disconnect(self, mock_disc, mock_vol_disc):
pass
def test__disconnect(self):
pass
@mock.patch('cinderlib.objects.Connection.conn_info', {'data': 'mydata'})
@mock.patch('cinderlib.objects.Connection.path')
@mock.patch('cinderlib.objects.Connection.device_attached')
def test_attach(self, mock_attached, mock_path):
pass
@mock.patch('cinderlib.objects.Connection.conn_info', {'data': 'mydata'})
@mock.patch('cinderlib.objects.Connection.device')
def test_detach(self, mock_device):
pass
def test_get_by_id(self):
pass
def test_get_by_id_not_found(self):
pass
def test_device_attached(self):
pass
def test_conn_info_setter(self):
pass
def test_conn_info_setter_clear(self):
pass
def test_conn_info_getter_default_attach_mode(self):
pass
def test_conn_info_getter_ro(self):
pass
def test_conn_info_getter_none(self):
pass
def test_protocol(self):
pass
def test_connector_info_setter(self):
pass
def test_connector_info_getter(self):
pass
def test_connector_info_getter_empty(self):
pass
def test_device_setter(self):
pass
def test_device_setter_none(self):
pass
def test_device_getter(self):
pass
def test_path(self):
pass
@mock.patch('cinderlib.objects.Connection.conn_info')
@mock.patch('cinderlib.objects.Connection.protocol')
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory')
def test_connector_getter(self, mock_connector, mock_proto, mock_info):
pass
@ddt.data(True, False)
def test_attached_true(self, value):
pass
@ddt.data(True, False)
def test_connected(self, value):
pass
| 48 | 0 | 6 | 0 | 6 | 0 | 1 | 0 | 1 | 3 | 0 | 0 | 35 | 8 | 35 | 38 | 247 | 37 | 209 | 66 | 161 | 1 | 161 | 58 | 125 | 1 | 2 | 1 | 35 |
3,638 |
Akrog/cinderlib
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Akrog_cinderlib/cinderlib/tests/unit/objects/test_snapshot.py
|
cinderlib.tests.unit.objects.test_snapshot.TestSnapshot
|
class TestSnapshot(base.BaseTest):
def setUp(self):
super(TestSnapshot, self).setUp()
self.vol = objects.Volume(self.backend_name, size=10,
extra_specs={'e': 'v'},
qos_specs={'q': 'qv'})
self.snap = objects.Snapshot(self.vol,
name='my_snap', description='my_desc')
self.vol._snapshots.append(self.snap)
self.vol._ovo.snapshots.objects.append(self.snap._ovo)
def test_init_from_volume(self):
self.assertIsNotNone(self.snap.id)
self.assertEqual(self.backend, self.snap.backend)
self.assertEqual('my_snap', self.snap.name)
self.assertEqual('my_snap', self.snap.display_name)
self.assertEqual('my_desc', self.snap.description)
self.assertEqual(self.vol.user_id, self.snap.user_id)
self.assertEqual(self.vol.project_id, self.snap.project_id)
self.assertEqual(self.vol.id, self.snap.volume_id)
self.assertEqual(self.vol.size, self.snap.volume_size)
self.assertEqual(self.vol._ovo, self.snap._ovo.volume)
self.assertEqual(self.vol.volume_type_id, self.snap.volume_type_id)
self.assertEqual(self.vol, self.snap.volume)
def test_init_from_ovo(self):
snap2 = objects.Snapshot(None, __ovo=self.snap._ovo)
self.assertEqual(self.snap.backend, snap2.backend)
self.assertEqual(self.snap._ovo, snap2._ovo)
self.assertEqual(self.vol, self.snap.volume)
def test_create(self):
update_vol = {'provider_id': 'provider_id'}
self.backend.driver.create_snapshot.return_value = update_vol
self.snap.create()
self.assertEqual('available', self.snap.status)
self.assertEqual('provider_id', self.snap.provider_id)
self.backend.driver.create_snapshot.assert_called_once_with(
self.snap._ovo)
self.persistence.set_snapshot.assert_called_once_with(self.snap)
def test_create_error(self):
self.backend.driver.create_snapshot.side_effect = exception.NotFound
with self.assertRaises(exception.NotFound) as assert_context:
self.snap.create()
self.assertEqual(self.snap, assert_context.exception.resource)
self.backend.driver.create_snapshot.assert_called_once_with(
self.snap._ovo)
self.assertEqual('error', self.snap.status)
self.persistence.set_snapshot.assert_called_once_with(self.snap)
def test_delete(self):
with mock.patch.object(
self.vol, '_snapshot_removed',
wraps=self.vol._snapshot_removed) as snap_removed_mock:
self.snap.delete()
snap_removed_mock.assert_called_once_with(self.snap)
self.backend.driver.delete_snapshot.assert_called_once_with(
self.snap._ovo)
self.persistence.delete_snapshot.assert_called_once_with(self.snap)
self.assertEqual([], self.vol.snapshots)
self.assertEqual([], self.vol._ovo.snapshots.objects)
self.assertEqual('deleted', self.snap._ovo.status)
@mock.patch('cinderlib.objects.Volume._snapshot_removed')
def test_delete_error(self, snap_removed_mock):
self.backend.driver.delete_snapshot.side_effect = exception.NotFound
with self.assertRaises(exception.NotFound) as assert_context:
self.snap.delete()
self.assertEqual(self.snap, assert_context.exception.resource)
self.backend.driver.delete_snapshot.assert_called_once_with(
self.snap._ovo)
snap_removed_mock.assert_not_called()
self.persistence.delete_snapshot.assert_not_called()
self.assertEqual([self.snap], self.vol.snapshots)
self.assertEqual([self.snap._ovo], self.vol._ovo.snapshots.objects)
self.assertEqual('error_deleting', self.snap._ovo.status)
def test_create_volume(self):
create_mock = self.backend.driver.create_volume_from_snapshot
create_mock.return_value = None
vol2 = self.snap.create_volume(name='new_name', description='new_desc')
create_mock.assert_called_once_with(vol2._ovo, self.snap._ovo)
self.assertEqual('available', vol2.status)
self.assertEqual(1, len(self.backend._volumes))
self.assertEqual(vol2, self.backend._volumes[0])
self.persistence.set_volume.assert_called_once_with(vol2)
self.assertEqual(self.vol.id, self.vol.volume_type_id)
self.assertNotEqual(self.vol.id, vol2.id)
self.assertEqual(vol2.id, vol2.volume_type_id)
self.assertEqual(self.vol.volume_type.extra_specs,
vol2.volume_type.extra_specs)
self.assertEqual(self.vol.volume_type.qos_specs.specs,
vol2.volume_type.qos_specs.specs)
def test_create_volume_error(self):
create_mock = self.backend.driver.create_volume_from_snapshot
create_mock.side_effect = exception.NotFound
with self.assertRaises(exception.NotFound) as assert_context:
self.snap.create_volume()
self.assertEqual(1, len(self.backend._volumes_inflight))
vol2 = list(self.backend._volumes_inflight.values())[0]
self.assertEqual(vol2, assert_context.exception.resource)
create_mock.assert_called_once_with(vol2, self.snap._ovo)
self.assertEqual('error', vol2.status)
self.persistence.set_volume.assert_called_once_with(mock.ANY)
def test_get_by_id(self):
mock_get_snaps = self.persistence.get_snapshots
mock_get_snaps.return_value = [mock.sentinel.snap]
res = objects.Snapshot.get_by_id(mock.sentinel.snap_id)
mock_get_snaps.assert_called_once_with(
snapshot_id=mock.sentinel.snap_id)
self.assertEqual(mock.sentinel.snap, res)
def test_get_by_id_not_found(self):
mock_get_snaps = self.persistence.get_snapshots
mock_get_snaps.return_value = None
self.assertRaises(exception.SnapshotNotFound,
objects.Snapshot.get_by_id, mock.sentinel.snap_id)
mock_get_snaps.assert_called_once_with(
snapshot_id=mock.sentinel.snap_id)
def test_get_by_name(self):
res = objects.Snapshot.get_by_name(mock.sentinel.name)
mock_get_snaps = self.persistence.get_snapshots
mock_get_snaps.assert_called_once_with(
snapshot_name=mock.sentinel.name)
self.assertEqual(mock_get_snaps.return_value, res)
|
class TestSnapshot(base.BaseTest):
def setUp(self):
pass
def test_init_from_volume(self):
pass
def test_init_from_ovo(self):
pass
def test_create(self):
pass
def test_create_error(self):
pass
def test_delete(self):
pass
@mock.patch('cinderlib.objects.Volume._snapshot_removed')
def test_delete_error(self, snap_removed_mock):
pass
def test_create_volume(self):
pass
def test_create_volume_error(self):
pass
def test_get_by_id(self):
pass
def test_get_by_id_not_found(self):
pass
def test_get_by_name(self):
pass
| 14 | 0 | 10 | 0 | 10 | 0 | 1 | 0 | 1 | 2 | 0 | 0 | 12 | 3 | 12 | 15 | 130 | 12 | 118 | 31 | 104 | 0 | 102 | 26 | 89 | 1 | 2 | 1 | 12 |
3,639 |
Akrog/cinderlib
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/Akrog_cinderlib/cinderlib/tests/unit/objects/test_volume.py
|
cinderlib.tests.unit.objects.test_volume.TestVolume
|
class TestVolume(base.BaseTest):
def test_init_from_args_backend_name(self):
vol = objects.Volume(self.backend_name,
name='vol_name', description='vol_desc', size=10)
self.assertEqual(self.backend, vol.backend)
self.assertEqual('vol_name', vol.name)
self.assertEqual('vol_name', vol.display_name)
self.assertEqual('vol_desc', vol.description)
self.assertEqual(10, vol.size)
self.assertIsNotNone(vol.id)
def test_init_from_args_backend(self):
vol = objects.Volume(self.backend,
name='vol_name', description='vol_desc', size=10)
self.assertEqual(self.backend, vol.backend)
self.assertEqual('vol_name', vol.name)
self.assertEqual('vol_name', vol.display_name)
self.assertEqual('vol_desc', vol.description)
self.assertEqual(10, vol.size)
self.assertIsNotNone(vol.id)
def test_init_from_volume(self):
vol = objects.Volume(self.backend,
name='vol_name', description='vol_desc', size=10)
vol2 = objects.Volume(vol, name='new_name', size=11)
self.assertEqual(self.backend, vol2.backend)
self.assertEqual('new_name', vol2.name)
self.assertEqual('new_name', vol2.display_name)
self.assertEqual(vol.description, vol2.description)
self.assertEqual(11, vol2.size)
self.assertIsNotNone(vol2.id)
self.assertNotEqual(vol.id, vol2.id)
def test_init_from_ovo(self):
vol = objects.Volume(self.backend, size=10)
vol2 = objects.Volume(self.backend, __ovo=vol._ovo)
self.assertEqual(vol._ovo, vol2._ovo)
def test_snapshots_lazy_loading(self):
vol = objects.Volume(self.backend, size=10)
vol._snapshots = None
snaps = [objects.Snapshot(vol, name='my_snap')]
# Persistence retrieves Snapshots without the Volume, just volume_id
snaps[0]._ovo.volume = None
mock_get_snaps = self.persistence.get_snapshots
mock_get_snaps.return_value = snaps
result = vol.snapshots
mock_get_snaps.called_once_with(vol.id)
self.assertEqual(snaps, result)
self.assertEqual(snaps, vol._snapshots)
self.assertEqual(1, len(vol._ovo.snapshots))
self.assertEqual(vol._ovo.snapshots[0], result[0]._ovo)
# There is no second call when we reference it again
mock_get_snaps.reset_mock()
result = vol.snapshots
self.assertEqual(snaps, result)
mock_get_snaps.not_called()
def test_connections_lazy_loading(self):
vol = objects.Volume(self.backend, size=10)
vol._connections = None
conns = [objects.Connection(self.backend, connector={'k': 'v'},
volume_id=vol.id, status='attached',
attach_mode='rw',
connection_info={'conn': {}},
name='my_snap')]
mock_get_conns = self.persistence.get_connections
mock_get_conns.return_value = conns
result = vol.connections
mock_get_conns.called_once_with(volume_id=vol.id)
self.assertEqual(conns, result)
self.assertEqual(conns, vol._connections)
self.assertEqual(1, len(vol._ovo.volume_attachment))
self.assertEqual(vol._ovo.volume_attachment[0], result[0]._ovo)
# There is no second call when we reference it again
mock_get_conns.reset_mock()
result = vol.connections
self.assertEqual(conns, result)
mock_get_conns.not_called()
def test_get_by_id(self):
mock_get_vols = self.persistence.get_volumes
mock_get_vols.return_value = [mock.sentinel.vol]
res = objects.Volume.get_by_id(mock.sentinel.vol_id)
mock_get_vols.assert_called_once_with(volume_id=mock.sentinel.vol_id)
self.assertEqual(mock.sentinel.vol, res)
def test_get_by_id_not_found(self):
mock_get_vols = self.persistence.get_volumes
mock_get_vols.return_value = None
self.assertRaises(exception.VolumeNotFound,
objects.Volume.get_by_id, mock.sentinel.vol_id)
mock_get_vols.assert_called_once_with(volume_id=mock.sentinel.vol_id)
def test_get_by_name(self):
res = objects.Volume.get_by_name(mock.sentinel.name)
mock_get_vols = self.persistence.get_volumes
mock_get_vols.assert_called_once_with(volume_name=mock.sentinel.name)
self.assertEqual(mock_get_vols.return_value, res)
def test_create(self):
self.backend.driver.create_volume.return_value = None
vol = self.backend.create_volume(10, name='vol_name',
description='des')
self.backend.driver.create_volume.assert_called_once_with(vol._ovo)
self.assertEqual('available', vol.status)
self.persistence.set_volume.assert_called_once_with(vol)
def test_create_error(self):
self.backend.driver.create_volume.side_effect = exception.NotFound
with self.assertRaises(exception.NotFound) as assert_context:
self.backend.create_volume(10, name='vol_name', description='des')
vol = assert_context.exception.resource
self.assertIsInstance(vol, objects.Volume)
self.assertEqual(10, vol.size)
self.assertEqual('vol_name', vol.name)
self.assertEqual('des', vol.description)
def test_delete(self):
vol = objects.Volume(self.backend_name, size=10)
vol.delete()
self.backend.driver.delete_volume.assert_called_once_with(vol._ovo)
self.persistence.delete_volume.assert_called_once_with(vol)
self.assertEqual('deleted', vol._ovo.status)
def test_delete_error_with_snaps(self):
vol = objects.Volume(self.backend_name, size=10, status='available')
snap = objects.Snapshot(vol)
vol._snapshots.append(snap)
self.assertRaises(exception.InvalidVolume, vol.delete)
self.assertEqual('available', vol._ovo.status)
def test_delete_error(self):
vol = objects.Volume(self.backend_name,
name='vol_name', description='vol_desc', size=10)
self.backend.driver.delete_volume.side_effect = exception.NotFound
with self.assertRaises(exception.NotFound) as assert_context:
vol.delete()
self.assertEqual(vol, assert_context.exception.resource)
self.backend.driver.delete_volume.assert_called_once_with(vol._ovo)
self.assertEqual('error_deleting', vol._ovo.status)
def test_extend(self):
vol = objects.Volume(self.backend_name, status='available', size=10)
vol.extend(11)
self.backend.driver.extend_volume.assert_called_once_with(vol._ovo, 11)
self.persistence.set_volume.assert_called_once_with(vol)
self.assertEqual('available', vol.status)
self.assertEqual(11, vol.size)
def test_extend_error(self):
vol = objects.Volume(self.backend_name, status='available', size=10)
self.backend.driver.extend_volume.side_effect = exception.NotFound
with self.assertRaises(exception.NotFound) as assert_context:
vol.extend(11)
self.assertEqual(vol, assert_context.exception.resource)
self.backend.driver.extend_volume.assert_called_once_with(vol._ovo, 11)
self.persistence.set_volume.assert_called_once_with(vol)
self.assertEqual('error', vol.status)
self.assertEqual(10, vol.size)
def test_clone(self):
vol = objects.Volume(self.backend_name, status='available', size=10,
extra_specs={'e': 'v'}, qos_specs={'q': 'qv'})
mock_clone = self.backend.driver.create_cloned_volume
mock_clone.return_value = None
res = vol.clone(size=11)
mock_clone.assert_called_once_with(res._ovo, vol._ovo)
self.persistence.set_volume.assert_called_once_with(res)
self.assertEqual('available', res.status)
self.assertEqual(11, res.size)
self.assertEqual(vol.id, vol.volume_type_id)
self.assertNotEqual(vol.id, res.id)
self.assertEqual(res.id, res.volume_type_id)
self.assertEqual(vol.volume_type.extra_specs,
res.volume_type.extra_specs)
self.assertEqual(vol.volume_type.qos_specs.specs,
res.volume_type.qos_specs.specs)
def test_clone_error(self):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_clone = self.backend.driver.create_cloned_volume
mock_clone.side_effect = exception.NotFound
with self.assertRaises(exception.NotFound) as assert_context:
vol.clone(size=11)
# Cloning volume is still in flight
self.assertEqual(1, len(self.backend._volumes_inflight))
new_vol = list(self.backend._volumes_inflight.values())[0]
self.assertEqual(new_vol, assert_context.exception.resource)
mock_clone.assert_called_once_with(new_vol, vol._ovo)
self.persistence.set_volume.assert_called_once_with(new_vol)
self.assertEqual('error', new_vol.status)
self.assertEqual(11, new_vol.size)
def test_create_snapshot(self):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_create = self.backend.driver.create_snapshot
mock_create.return_value = None
snap = vol.create_snapshot()
self.assertEqual([snap], vol.snapshots)
self.assertEqual([snap._ovo], vol._ovo.snapshots.objects)
mock_create.assert_called_once_with(snap._ovo)
self.assertEqual('available', snap.status)
self.assertEqual(10, snap.volume_size)
self.persistence.set_snapshot.assert_called_once_with(snap)
def test_create_snapshot_error(self):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_create = self.backend.driver.create_snapshot
mock_create.side_effect = exception.NotFound
self.assertRaises(exception.NotFound, vol.create_snapshot)
self.assertEqual(1, len(vol.snapshots))
snap = vol.snapshots[0]
self.persistence.set_snapshot.assert_called_once_with(snap)
self.assertEqual('error', snap.status)
mock_create.assert_called_once_with(snap._ovo)
@mock.patch('os_brick.initiator.connector.get_connector_properties')
@mock.patch('cinderlib.objects.Volume.connect')
def test_attach(self, mock_connect, mock_conn_props):
vol = objects.Volume(self.backend_name, status='available', size=10)
res = vol.attach()
mock_conn_props.assert_called_once_with(
self.backend.root_helper,
mock.ANY,
self.backend.configuration.use_multipath_for_image_xfer,
self.backend.configuration.enforce_multipath_for_image_xfer)
mock_connect.assert_called_once_with(mock_conn_props.return_value)
mock_connect.return_value.attach.assert_called_once_with()
self.assertEqual(mock_connect.return_value, res)
@mock.patch('os_brick.initiator.connector.get_connector_properties')
@mock.patch('cinderlib.objects.Volume.connect')
def test_attach_error_connect(self, mock_connect, mock_conn_props):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_connect.side_effect = exception.NotFound
self.assertRaises(exception.NotFound, vol.attach)
mock_conn_props.assert_called_once_with(
self.backend.root_helper,
mock.ANY,
self.backend.configuration.use_multipath_for_image_xfer,
self.backend.configuration.enforce_multipath_for_image_xfer)
mock_connect.assert_called_once_with(mock_conn_props.return_value)
mock_connect.return_value.attach.assert_not_called()
@mock.patch('cinderlib.objects.Volume.disconnect')
@mock.patch('os_brick.initiator.connector.get_connector_properties')
@mock.patch('cinderlib.objects.Volume.connect')
def test_attach_error_attach(self, mock_connect, mock_conn_props,
mock_disconnect):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_attach = mock_connect.return_value.attach
mock_attach.side_effect = exception.NotFound
self.assertRaises(exception.NotFound, vol.attach)
mock_conn_props.assert_called_once_with(
self.backend.root_helper,
mock.ANY,
self.backend.configuration.use_multipath_for_image_xfer,
self.backend.configuration.enforce_multipath_for_image_xfer)
mock_connect.assert_called_once_with(mock_conn_props.return_value)
mock_disconnect.assert_called_once_with(mock_connect.return_value)
def test_detach_not_local(self):
vol = objects.Volume(self.backend_name, status='available', size=10)
self.assertRaises(exception.NotLocal, vol.detach)
def test_detach(self):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_conn = mock.Mock()
vol.local_attach = mock_conn
vol.detach(mock.sentinel.force, mock.sentinel.ignore_errors)
mock_conn.detach.assert_called_once_with(mock.sentinel.force,
mock.sentinel.ignore_errors,
mock.ANY)
mock_conn.disconnect.assert_called_once_with(mock.sentinel.force)
def test_detach_error_detach(self):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_conn = mock.Mock()
mock_conn.detach.side_effect = exception.NotFound
vol.local_attach = mock_conn
self.assertRaises(exception.NotFound,
vol.detach,
False, mock.sentinel.ignore_errors)
mock_conn.detach.assert_called_once_with(False,
mock.sentinel.ignore_errors,
mock.ANY)
mock_conn.disconnect.assert_not_called()
def test_detach_error_disconnect(self):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_conn = mock.Mock()
mock_conn.disconnect.side_effect = exception.NotFound
vol.local_attach = mock_conn
self.assertRaises(objects.brick_exception.ExceptionChainer,
vol.detach,
mock.sentinel.force, False)
mock_conn.detach.assert_called_once_with(mock.sentinel.force,
False,
mock.ANY)
mock_conn.disconnect.assert_called_once_with(mock.sentinel.force)
@mock.patch('cinderlib.objects.Connection.connect')
def test_connect(self, mock_connect):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_connect.return_value._ovo = objects.cinder_objs.VolumeAttachment()
mock_export = self.backend.driver.create_export
mock_export.return_value = None
res = vol.connect(mock.sentinel.conn_dict)
mock_connect.assert_called_once_with(vol, mock.sentinel.conn_dict)
self.assertEqual([res], vol.connections)
self.assertEqual([res._ovo], vol._ovo.volume_attachment.objects)
self.assertEqual('in-use', vol.status)
self.persistence.set_volume.assert_called_once_with(vol)
@mock.patch('cinderlib.objects.Volume._remove_export')
@mock.patch('cinderlib.objects.Connection.connect')
def test_connect_error(self, mock_connect, mock_remove_export):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_export = self.backend.driver.create_export
mock_export.return_value = None
mock_connect.side_effect = exception.NotFound
self.assertRaises(exception.NotFound,
vol.connect, mock.sentinel.conn_dict)
mock_connect.assert_called_once_with(vol, mock.sentinel.conn_dict)
self.assertEqual('available', vol.status)
self.persistence.set_volume.assert_not_called()
mock_remove_export.assert_called_once_with()
@mock.patch('cinderlib.objects.Volume._disconnect')
def test_disconnect(self, mock_disconnect):
vol = objects.Volume(self.backend_name, status='available', size=10)
mock_conn = mock.Mock()
vol.disconnect(mock_conn, mock.sentinel.force)
mock_conn._disconnect.assert_called_once_with(mock.sentinel.force)
mock_disconnect.assert_called_once_with(mock_conn)
@mock.patch('cinderlib.objects.Volume._connection_removed')
@mock.patch('cinderlib.objects.Volume._remove_export')
def test__disconnect(self, mock_remove_export, mock_conn_removed):
vol = objects.Volume(self.backend_name, status='in-use', size=10)
vol._disconnect(mock.sentinel.connection)
mock_remove_export.assert_called_once_with()
mock_conn_removed.assert_called_once_with(mock.sentinel.connection)
self.assertEqual('available', vol.status)
self.persistence.set_volume.assert_called_once_with(vol)
def test__remove_export(self):
vol = objects.Volume(self.backend_name, status='in-use', size=10)
vol._remove_export()
self.backend.driver.remove_export.assert_called_once_with(vol._context,
vol._ovo)
@mock.patch('cinderlib.objects.Volume._remove_export')
def test_cleanup(self, mock_remove_export):
vol = objects.Volume(self.backend_name, status='in-use', size=10)
connections = [mock.Mock(), mock.Mock()]
vol._connections = connections
vol.cleanup()
mock_remove_export.assert_called_once_with()
for c in connections:
c.detach.asssert_called_once_with()
def test__snapshot_removed_not_loaded(self):
vol = objects.Volume(self.backend,
name='vol_name', description='vol_desc', size=10)
vol._snapshots = None
snap = objects.Snapshot(vol)
# Just check it doesn't break
vol._snapshot_removed(snap)
def test__snapshot_removed_not_present(self):
vol = objects.Volume(self.backend,
name='vol_name', description='vol_desc', size=10)
snap = objects.Snapshot(vol)
snap2 = objects.Snapshot(vol)
vol._snapshots = [snap2]
vol._ovo.snapshots.objects = [snap2._ovo]
# Just check it doesn't break or remove any other snaps
vol._snapshot_removed(snap)
self.assertEqual([snap2], vol._snapshots)
self.assertEqual([snap2._ovo], vol._ovo.snapshots.objects)
def test__snapshot_removed(self):
vol = objects.Volume(self.backend,
name='vol_name', description='vol_desc', size=10)
snap = objects.Snapshot(vol)
snap2 = objects.Snapshot(vol)
snap_other_instance = objects.Snapshot(vol, id=snap.id,
description='d')
snap_other_instance2 = objects.Snapshot(vol, id=snap.id,
description='e')
vol._snapshots = [snap2, snap_other_instance]
vol._ovo.snapshots.objects = [snap2._ovo, snap_other_instance2._ovo]
# Just check it doesn't break or remove any other snaps
vol._snapshot_removed(snap)
self.assertEqual([snap2], vol._snapshots)
self.assertEqual([snap2._ovo], vol._ovo.snapshots.objects)
def test__connection_removed_not_loaded(self):
vol = objects.Volume(self.backend,
name='vol_name', description='vol_desc', size=10)
vol._connections = None
conn = objects.Connection(self.backend, connection_info={'conn': {}})
# Just check it doesn't break
vol._connection_removed(conn)
def test__connection_removed_not_present(self):
vol = objects.Volume(self.backend,
name='vol_name', description='vol_desc', size=10)
conn = objects.Connection(self.backend, connection_info={'conn': {}})
conn2 = objects.Connection(self.backend, connection_info={'conn': {}})
vol._connections = [conn2]
vol._ovo.volume_attachment.objects = [conn2._ovo]
# Just check it doesn't break or remove any other snaps
vol._connection_removed(conn)
self.assertEqual([conn2], vol._connections)
self.assertEqual([conn2._ovo], vol._ovo.volume_attachment.objects)
def test__connection_removed(self):
vol = objects.Volume(self.backend, size=10)
conn = objects.Connection(self.backend, connection_info={'conn': {}})
conn2 = objects.Connection(self.backend, connection_info={'conn': {}})
conn_other_instance = objects.Connection(self.backend, id=conn.id,
connection_info={'conn': {}})
conn_other_instance2 = objects.Connection(self.backend, id=conn.id,
connection_info={'conn': {}})
vol._connections = [conn2, conn_other_instance]
vol._ovo.volume_attachment.objects = [conn2._ovo,
conn_other_instance2._ovo]
# Just check it doesn't break or remove any other snaps
vol._connection_removed(conn)
self.assertEqual([conn2], vol._connections)
self.assertEqual([conn2._ovo], vol._ovo.volume_attachment.objects)
|
class TestVolume(base.BaseTest):
def test_init_from_args_backend_name(self):
pass
def test_init_from_args_backend_name(self):
pass
def test_init_from_volume(self):
pass
def test_init_from_ovo(self):
pass
def test_snapshots_lazy_loading(self):
pass
def test_connections_lazy_loading(self):
pass
def test_get_by_id(self):
pass
def test_get_by_id_not_found(self):
pass
def test_get_by_name(self):
pass
def test_create(self):
pass
def test_create_error(self):
pass
def test_delete(self):
pass
def test_delete_error_with_snaps(self):
pass
def test_delete_error_with_snaps(self):
pass
def test_extend(self):
pass
def test_extend_error(self):
pass
def test_clone(self):
pass
def test_clone_error(self):
pass
def test_create_snapshot(self):
pass
def test_create_snapshot_error(self):
pass
@mock.patch('os_brick.initiator.connector.get_connector_properties')
@mock.patch('cinderlib.objects.Volume.connect')
def test_attach(self, mock_connect, mock_conn_props):
pass
@mock.patch('os_brick.initiator.connector.get_connector_properties')
@mock.patch('cinderlib.objects.Volume.connect')
def test_attach_error_connect(self, mock_connect, mock_conn_props):
pass
@mock.patch('cinderlib.objects.Volume.disconnect')
@mock.patch('os_brick.initiator.connector.get_connector_properties')
@mock.patch('cinderlib.objects.Volume.connect')
def test_attach_error_attach(self, mock_connect, mock_conn_props,
mock_disconnect):
pass
def test_detach_not_local(self):
pass
def test_detach_not_local(self):
pass
def test_detach_error_detach(self):
pass
def test_detach_error_disconnect(self):
pass
@mock.patch('cinderlib.objects.Connection.connect')
def test_connections_lazy_loading(self):
pass
@mock.patch('cinderlib.objects.Volume._remove_export')
@mock.patch('cinderlib.objects.Connection.connect')
def test_connect_error(self, mock_connect, mock_remove_export):
pass
@mock.patch('cinderlib.objects.Volume._disconnect')
def test_disconnect(self, mock_disconnect):
pass
@mock.patch('cinderlib.objects.Volume._connection_removed')
@mock.patch('cinderlib.objects.Volume._remove_export')
def test__disconnect(self, mock_remove_export, mock_conn_removed):
pass
def test__remove_export(self):
pass
@mock.patch('cinderlib.objects.Volume._remove_export')
def test_cleanup(self, mock_remove_export):
pass
def test__snapshot_removed_not_loaded(self):
pass
def test__snapshot_removed_not_present(self):
pass
def test__snapshot_removed_not_loaded(self):
pass
def test__connection_removed_not_loaded(self):
pass
def test__connection_removed_not_present(self):
pass
def test__connection_removed_not_loaded(self):
pass
| 54 | 0 | 11 | 1 | 10 | 0 | 1 | 0.03 | 1 | 1 | 0 | 0 | 39 | 2 | 39 | 42 | 480 | 84 | 386 | 136 | 331 | 10 | 324 | 123 | 284 | 2 | 2 | 1 | 40 |
3,640 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/exception.py
|
cinderlib.exception.NotLocal
|
class NotLocal(Exception):
__msg = "Volume %s doesn't seem to be attached locally."
def __init__(self, name):
super(NotLocal, self).__init__(self.__msg % name)
|
class NotLocal(Exception):
def __init__(self, name):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 11 | 5 | 1 | 4 | 3 | 2 | 0 | 4 | 3 | 2 | 1 | 3 | 0 | 1 |
3,641 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/exception.py
|
cinderlib.exception.InvalidPersistence
|
class InvalidPersistence(Exception):
__msg = 'Invalid persistence storage: %s.'
def __init__(self, name):
super(InvalidPersistence, self).__init__(self.__msg % name)
|
class InvalidPersistence(Exception):
def __init__(self, name):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 11 | 5 | 1 | 4 | 3 | 2 | 0 | 4 | 3 | 2 | 1 | 3 | 0 | 1 |
3,642 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/cinderlib.py
|
cinderlib.cinderlib.Backend
|
class Backend(object):
"""Representation of a Cinder Driver.
User facing attributes are:
- __init__
- json
- jsons
- load
- stats
- create_volume
- global_setup
- validate_connector
"""
backends = {}
global_initialization = False
# Some drivers try access the DB directly for extra specs on creation.
# With this dictionary the DB class can get the necessary data
_volumes_inflight = {}
def __init__(self, volume_backend_name, **driver_cfg):
if not self.global_initialization:
self.global_setup()
driver_cfg['volume_backend_name'] = volume_backend_name
Backend.backends[volume_backend_name] = self
conf = self._set_backend_config(driver_cfg)
self.driver = importutils.import_object(
conf.volume_driver,
configuration=conf,
db=self.persistence.db,
host='%s@%s' % (cfg.CONF.host, volume_backend_name),
cluster_name=None, # We don't use cfg.CONF.cluster for now
active_backend_id=None) # No failover for now
self.driver.do_setup(objects.CONTEXT)
self.driver.check_for_setup_error()
self.driver.init_capabilities()
self.driver.set_throttle()
self.driver.set_initialized()
self._driver_cfg = driver_cfg
self._volumes = None
# Some drivers don't implement the caching correctly. Populate cache
# with data retrieved in init_capabilities.
stats = self.driver.capabilities.copy()
stats.pop('properties', None)
stats.pop('vendor_prefix', None)
self._stats = self._transform_legacy_stats(stats)
self._pool_names = tuple(pool['pool_name'] for pool in stats['pools'])
@property
def pool_names(self):
return self._pool_names
def __repr__(self):
return '<cinderlib.Backend %s>' % self.id
def __getattr__(self, name):
return getattr(self.driver, name)
@property
def id(self):
return self._driver_cfg['volume_backend_name']
@property
def volumes(self):
if self._volumes is None:
self._volumes = self.persistence.get_volumes(backend_name=self.id)
return self._volumes
def volumes_filtered(self, volume_id=None, volume_name=None):
return self.persistence.get_volumes(backend_name=self.id,
volume_id=volume_id,
volume_name=volume_name)
def _transform_legacy_stats(self, stats):
"""Convert legacy stats to new stats with pools key."""
# Fill pools for legacy driver reports
if stats and 'pools' not in stats:
pool = stats.copy()
pool['pool_name'] = self.id
for key in ('driver_version', 'shared_targets',
'sparse_copy_volume', 'storage_protocol',
'vendor_name', 'volume_backend_name'):
pool.pop(key, None)
stats['pools'] = [pool]
return stats
def stats(self, refresh=False):
# Some drivers don't implement the caching correctly, so we implement
# it ourselves.
if refresh:
stats = self.driver.get_volume_stats(refresh=refresh)
self._stats = self._transform_legacy_stats(stats)
return self._stats
def create_volume(self, size, name='', description='', bootable=False,
**kwargs):
vol = objects.Volume(self, size=size, name=name,
description=description, bootable=bootable,
**kwargs)
vol.create()
return vol
def _volume_removed(self, volume):
i, vol = cinderlib_utils.find_by_id(volume.id, self._volumes)
if vol:
del self._volumes[i]
@classmethod
def _start_creating_volume(cls, volume):
cls._volumes_inflight[volume.id] = volume
def _volume_created(self, volume):
if self._volumes is not None:
self._volumes.append(volume)
self._volumes_inflight.pop(volume.id, None)
def validate_connector(self, connector_dict):
"""Raise exception if missing info for volume's connect call."""
self.driver.validate_connector(connector_dict)
@classmethod
def set_persistence(cls, persistence_config):
if not hasattr(cls, 'project_id'):
raise Exception('set_persistence can only be called after '
'cinderlib has been configured')
cls.persistence = persistence.setup(persistence_config)
objects.setup(cls.persistence, Backend, cls.project_id, cls.user_id,
cls.non_uuid_ids)
for backend in cls.backends.values():
backend.driver.db = cls.persistence.db
# Replace the standard DB implementation instance with the one from
# the persistence plugin.
db_api.IMPL = cls.persistence.db
# NOTE(geguileo): Staticmethod used instead of classmethod to make it work
# on Python3 when assigning the unbound method.
@staticmethod
def _config_parse(self):
"""Replacer oslo_config.cfg.ConfigParser.parse for in-memory cfg."""
res = super(cfg.ConfigParser, self).parse(Backend._config_string_io)
return res
@classmethod
def _update_cinder_config(cls):
"""Parse in-memory file to update OSLO configuration used by Cinder."""
cls._config_string_io.seek(0)
cls._parser.write(cls._config_string_io)
# Check if we have any multiopt
cls._config_string_io.seek(0)
current_cfg = cls._config_string_io.read()
if '\n\t' in current_cfg:
cls._config_string_io.seek(0)
cls._config_string_io.write(current_cfg.replace('\n\t', '\n'))
cls._config_string_io.seek(0)
cfg.CONF.reload_config_files()
@classmethod
def _set_cinder_config(cls, host, locks_path, cinder_config_params):
"""Setup the parser with all the known Cinder configuration."""
cfg.CONF.set_default('state_path', os.getcwd())
cfg.CONF.set_default('lock_path', '$state_path', 'oslo_concurrency')
cls._parser = six.moves.configparser.SafeConfigParser()
cls._parser.set('DEFAULT', 'enabled_backends', '')
if locks_path:
cls._parser.add_section('oslo_concurrency')
cls._parser.set('oslo_concurrency', 'lock_path', locks_path)
cls._parser.add_section('coordination')
cls._parser.set('coordination',
'backend_url',
'file://' + locks_path)
if host:
cls._parser.set('DEFAULT', 'host', host)
# All other configuration options go into the DEFAULT section
cls.__set_parser_kv(cinder_config_params, 'DEFAULT')
# We replace the OSLO's default parser to read from a StringIO instead
# of reading from a file.
cls._config_string_io = six.moves.StringIO()
cfg.ConfigParser.parse = six.create_unbound_method(cls._config_parse,
cfg.ConfigParser)
# Replace command line arg parser so we ignore caller's args
cfg._CachedArgumentParser.parse_args = lambda *a, **kw: None
# Update the configuration with the options we have configured
cfg.CONF(project='cinder', version=cinderlib.__version__,
default_config_files=['in_memory_file'])
cls._update_cinder_config()
@classmethod
def __set_parser_kv(cls, kvs, section):
for key, val in kvs.items():
# We receive list or tuple for multiopt and ConfigParser doesn't
# support repeating the same entry multiple times, so we hack our
# way around it
if isinstance(val, (list, tuple)):
if not val:
val = ''
elif len(val) == 1:
val = val[0]
else:
val = (('%s\n' % val[0]) +
'\n'.join('%s = %s' % (key, v) for v in val[1:]))
if not isinstance(val, six.string_types):
val = six.text_type(val)
cls._parser.set(section, key, val)
def _set_backend_config(self, driver_cfg):
backend_name = driver_cfg['volume_backend_name']
self._parser.add_section(backend_name)
self.__set_parser_kv(driver_cfg, backend_name)
self._parser.set('DEFAULT', 'enabled_backends',
','.join(self.backends.keys()))
self._update_cinder_config()
config = configuration.Configuration(manager.volume_backend_opts,
config_group=backend_name)
return config
@classmethod
def global_setup(cls, file_locks_path=None, root_helper='sudo',
suppress_requests_ssl_warnings=True, disable_logs=True,
non_uuid_ids=False, output_all_backend_info=False,
project_id=None, user_id=None, persistence_config=None,
fail_on_missing_backend=True, host=None,
**cinder_config_params):
# Global setup can only be set once
if cls.global_initialization:
raise Exception('Already setup')
cls.fail_on_missing_backend = fail_on_missing_backend
cls.root_helper = root_helper
cls.project_id = project_id
cls.user_id = user_id
cls.non_uuid_ids = non_uuid_ids
cls.set_persistence(persistence_config)
cls._set_cinder_config(host, file_locks_path, cinder_config_params)
serialization.setup(cls)
cls._set_logging(disable_logs)
cls._set_priv_helper(root_helper)
coordination.COORDINATOR.start()
if suppress_requests_ssl_warnings:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
urllib3.disable_warnings(
urllib3.exceptions.InsecurePlatformWarning)
cls.global_initialization = True
cls.output_all_backend_info = output_all_backend_info
@classmethod
def _set_logging(cls, disable_logs):
if disable_logs:
logging.Logger.disabled = property(lambda s: True,
lambda s, x: None)
return
oslo_logging.setup(cfg.CONF, 'cinder')
logging.captureWarnings(True)
@classmethod
def _set_priv_helper(cls, root_helper):
utils.get_root_helper = lambda: root_helper
nos_brick.init(root_helper)
@property
def config(self):
if self.output_all_backend_info:
return self._driver_cfg
return {'volume_backend_name': self._driver_cfg['volume_backend_name']}
def _serialize(self, property_name):
result = [getattr(volume, property_name) for volume in self.volumes]
# We only need to output the full backend configuration once
if self.output_all_backend_info:
backend = {'volume_backend_name': self.id}
for volume in result:
volume['backend'] = backend
return {'class': type(self).__name__,
'backend': self.config,
'volumes': result}
@property
def json(self):
return self._serialize('json')
@property
def dump(self):
return self._serialize('dump')
@property
def jsons(self):
return json_lib.dumps(self.json)
@property
def dumps(self):
return json_lib.dumps(self.dump)
@classmethod
def load(cls, json_src, save=False):
backend = Backend.load_backend(json_src['backend'])
volumes = json_src.get('volumes')
if volumes:
backend._volumes = [objects.Volume.load(v, save) for v in volumes]
return backend
@classmethod
def load_backend(cls, backend_data):
backend_name = backend_data['volume_backend_name']
if backend_name in cls.backends:
return cls.backends[backend_name]
if len(backend_data) > 1:
return cls(**backend_data)
if cls.fail_on_missing_backend:
raise Exception('Backend not present in system or json.')
return backend_name
def refresh(self):
if self._volumes is not None:
self._volumes = None
self.volumes
@staticmethod
def list_supported_drivers():
"""Returns dictionary with driver classes names as keys."""
def convert_oslo_config(oslo_options):
options = []
for opt in oslo_options:
tmp_dict = {k: str(v) for k, v in vars(opt).items()
if not k.startswith('_')}
options.append(tmp_dict)
return options
def list_drivers(queue):
cwd = os.getcwd()
# Go to the parent directory directory where Cinder is installed
os.chdir(utils.__file__.rsplit(os.sep, 2)[0])
try:
drivers = cinder_interface_util.get_volume_drivers()
mapping = {d.class_name: vars(d) for d in drivers}
# Drivers contain class instances which are not serializable
for driver in mapping.values():
driver.pop('cls', None)
if 'driver_options' in driver:
driver['driver_options'] = convert_oslo_config(
driver['driver_options'])
finally:
os.chdir(cwd)
queue.put(mapping)
# Use a different process to avoid having all driver classes loaded in
# memory during our execution.
queue = multiprocessing.Queue()
p = multiprocessing.Process(target=list_drivers, args=(queue,))
p.start()
result = queue.get()
p.join()
return result
|
class Backend(object):
'''Representation of a Cinder Driver.
User facing attributes are:
- __init__
- json
- jsons
- load
- stats
- create_volume
- global_setup
- validate_connector
'''
def __init__(self, volume_backend_name, **driver_cfg):
pass
@property
def pool_names(self):
pass
def __repr__(self):
pass
def __getattr__(self, name):
pass
@property
def id(self):
pass
@property
def volumes(self):
pass
def volumes_filtered(self, volume_id=None, volume_name=None):
pass
def _transform_legacy_stats(self, stats):
'''Convert legacy stats to new stats with pools key.'''
pass
def stats(self, refresh=False):
pass
def create_volume(self, size, name='', description='', bootable=False,
**kwargs):
pass
def _volume_removed(self, volume):
pass
@classmethod
def _start_creating_volume(cls, volume):
pass
def _volume_created(self, volume):
pass
def validate_connector(self, connector_dict):
'''Raise exception if missing info for volume's connect call.'''
pass
@classmethod
def set_persistence(cls, persistence_config):
pass
@staticmethod
def _config_parse(self):
'''Replacer oslo_config.cfg.ConfigParser.parse for in-memory cfg.'''
pass
@classmethod
def _update_cinder_config(cls):
'''Parse in-memory file to update OSLO configuration used by Cinder.'''
pass
@classmethod
def _set_cinder_config(cls, host, locks_path, cinder_config_params):
'''Setup the parser with all the known Cinder configuration.'''
pass
@classmethod
def __set_parser_kv(cls, kvs, section):
pass
def _set_backend_config(self, driver_cfg):
pass
@classmethod
def global_setup(cls, file_locks_path=None, root_helper='sudo',
suppress_requests_ssl_warnings=True, disable_logs=True,
non_uuid_ids=False, output_all_backend_info=False,
project_id=None, user_id=None, persistence_config=None,
fail_on_missing_backend=True, host=None,
**cinder_config_params):
pass
@classmethod
def _set_logging(cls, disable_logs):
pass
@classmethod
def _set_priv_helper(cls, root_helper):
pass
@property
def config(self):
pass
def _serialize(self, property_name):
pass
@property
def json(self):
pass
@property
def dump(self):
pass
@property
def jsons(self):
pass
@property
def dumps(self):
pass
@classmethod
def load(cls, json_src, save=False):
pass
@classmethod
def load_backend(cls, backend_data):
pass
def refresh(self):
pass
@staticmethod
def list_supported_drivers():
'''Returns dictionary with driver classes names as keys.'''
pass
def convert_oslo_config(oslo_options):
pass
def list_drivers(queue):
pass
| 56 | 7 | 9 | 1 | 8 | 1 | 2 | 0.17 | 1 | 10 | 0 | 1 | 21 | 5 | 33 | 33 | 376 | 63 | 270 | 98 | 208 | 45 | 214 | 72 | 178 | 6 | 1 | 3 | 66 |
3,643 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/tests/unit/utils.py
|
cinderlib.tests.unit.utils.FakeBackend
|
class FakeBackend(cinderlib.Backend):
def __init__(self, *args, **kwargs):
driver_name = kwargs.get('volume_backend_name', 'fake')
cinderlib.Backend.backends[driver_name] = self
self._driver_cfg = {'volume_backend_name': driver_name}
self.driver = mock.Mock()
self.driver.persistence = cinderlib.Backend.persistence
self._pool_names = (driver_name,)
self._volumes = []
|
class FakeBackend(cinderlib.Backend):
def __init__(self, *args, **kwargs):
pass
| 2 | 0 | 8 | 0 | 8 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 4 | 1 | 34 | 9 | 0 | 9 | 7 | 7 | 0 | 9 | 7 | 7 | 1 | 2 | 0 | 1 |
3,644 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/tests/unit/persistence/test_dbms.py
|
cinderlib.tests.unit.persistence.test_dbms.TestMemoryDBPersistence
|
class TestMemoryDBPersistence(TestDBPersistence):
PERSISTENCE_CFG = {'storage': 'memory_db'}
|
class TestMemoryDBPersistence(TestDBPersistence):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 69 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 4 | 0 | 0 |
3,645 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/tests/unit/persistence/test_dbms.py
|
cinderlib.tests.unit.persistence.test_dbms.TestDBPersistence
|
class TestDBPersistence(base.BasePersistenceTest):
CONNECTION = 'sqlite:///' + tempfile.NamedTemporaryFile().name
PERSISTENCE_CFG = {'storage': 'db',
'connection': CONNECTION}
def tearDown(self):
sqla_api.model_query(self.context, sqla_api.models.Snapshot).delete()
sqla_api.model_query(self.context,
sqla_api.models.VolumeAttachment).delete()
sqla_api.model_query(self.context,
sqla_api.models.Volume).delete()
sqla_api.get_session().query(dbms.KeyValue).delete()
super(TestDBPersistence, self).tearDown()
def test_db(self):
self.assertIsInstance(self.persistence.db,
oslo_db_api.DBAPI)
def test_set_volume(self):
res = sqla_api.volume_get_all(self.context)
self.assertListEqual([], res)
vol = cinderlib.Volume(self.backend, size=1, name='disk')
expected = {'availability_zone': vol.availability_zone,
'size': vol.size, 'name': vol.name}
self.persistence.set_volume(vol)
db_vol = sqla_api.volume_get(self.context, vol.id)
actual = {'availability_zone': db_vol.availability_zone,
'size': db_vol.size, 'name': db_vol.display_name}
self.assertDictEqual(expected, actual)
def test_set_snapshot(self):
vol = cinderlib.Volume(self.backend, size=1, name='disk')
snap = cinderlib.Snapshot(vol, name='disk')
self.assertEqual(0, len(sqla_api.snapshot_get_all(self.context)))
self.persistence.set_snapshot(snap)
db_entries = sqla_api.snapshot_get_all(self.context)
self.assertEqual(1, len(db_entries))
ovo_snap = cinder_ovos.Snapshot(self.context)
ovo_snap._from_db_object(ovo_snap._context, ovo_snap, db_entries[0])
cl_snap = cinderlib.Snapshot(vol, __ovo=ovo_snap)
self.assertEqualObj(snap, cl_snap)
def test_set_connection(self):
vol = cinderlib.Volume(self.backend, size=1, name='disk')
conn = cinderlib.Connection(self.backend, volume=vol, connector={},
connection_info={'conn': {'data': {}}})
self.assertEqual(0,
len(sqla_api.volume_attachment_get_all(self.context)))
self.persistence.set_connection(conn)
db_entries = sqla_api.volume_attachment_get_all(self.context)
self.assertEqual(1, len(db_entries))
ovo_conn = cinder_ovos.VolumeAttachment(self.context)
ovo_conn._from_db_object(ovo_conn._context, ovo_conn, db_entries[0])
cl_conn = cinderlib.Connection(vol.backend, volume=vol, __ovo=ovo_conn)
self.assertEqualObj(conn, cl_conn)
def test_set_key_values(self):
res = sqla_api.get_session().query(dbms.KeyValue).all()
self.assertListEqual([], res)
expected = [dbms.KeyValue(key='key', value='value')]
self.persistence.set_key_value(expected[0])
actual = sqla_api.get_session().query(dbms.KeyValue).all()
self.assertListEqualObj(expected, actual)
|
class TestDBPersistence(base.BasePersistenceTest):
def tearDown(self):
pass
def test_db(self):
pass
def test_set_volume(self):
pass
def test_set_snapshot(self):
pass
def test_set_connection(self):
pass
def test_set_key_values(self):
pass
| 7 | 0 | 12 | 3 | 9 | 0 | 1 | 0 | 1 | 2 | 1 | 1 | 6 | 1 | 6 | 69 | 79 | 22 | 57 | 27 | 50 | 0 | 49 | 27 | 42 | 1 | 3 | 0 | 6 |
3,646 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/tests/unit/base.py
|
cinderlib.tests.unit.base.BaseTest
|
class BaseTest(unittest2.TestCase):
PERSISTENCE_CFG = None
def setUp(self):
if not self.PERSISTENCE_CFG:
cfg = {'storage': utils.get_mock_persistence()}
cinderlib.Backend.set_persistence(cfg)
self.backend_name = 'fake_backend'
self.backend = utils.FakeBackend(volume_backend_name=self.backend_name)
self.persistence = self.backend.persistence
cinderlib.Backend._volumes_inflight = {}
def tearDown(self):
# Clear all existing backends
cinderlib.Backend.backends = {}
def patch(self, path, *args, **kwargs):
"""Use python mock to mock a path with automatic cleanup."""
patcher = mock.patch(path, *args, **kwargs)
result = patcher.start()
self.addCleanup(patcher.stop)
return result
|
class BaseTest(unittest2.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def patch(self, path, *args, **kwargs):
'''Use python mock to mock a path with automatic cleanup.'''
pass
| 4 | 1 | 6 | 0 | 5 | 1 | 1 | 0.12 | 1 | 2 | 2 | 6 | 3 | 3 | 3 | 3 | 22 | 3 | 17 | 11 | 13 | 2 | 17 | 11 | 13 | 2 | 1 | 1 | 4 |
3,647 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/tests/functional/test_basic.py
|
cinderlib.tests.functional.test_basic.BackendFunctBasic
|
class BackendFunctBasic(base_tests.BaseFunctTestCase):
def test_stats(self):
stats = self.backend.stats()
self.assertIn('vendor_name', stats)
self.assertIn('volume_backend_name', stats)
pools_info = self._pools_info(stats)
for pool_info in pools_info:
self.assertIn('free_capacity_gb', pool_info)
self.assertIn('total_capacity_gb', pool_info)
def _volumes_in_pools(self, pools_info):
if not any('total_volumes' in p for p in pools_info):
return None
return sum(p.get('total_volumes', 0) for p in pools_info)
def test_stats_with_creation(self):
# This test can fail if we are don't have exclusive usage of the
# storage pool used in the tests or if the specific driver does not
# return the right values in allocated_capacity_gb or
# provisioned_capacity_gb.
initial_stats = self.backend.stats(refresh=True)
vol = self._create_vol(self.backend)
new_stats = self.backend.stats(refresh=True)
initial_pools_info = self._pools_info(initial_stats)
new_pools_info = self._pools_info(new_stats)
initial_volumes = self._volumes_in_pools(initial_pools_info)
new_volumes = self._volumes_in_pools(new_pools_info)
# If the backend is reporting the number of volumes, check them
if initial_volumes is not None:
self.assertEqual(initial_volumes + 1, new_volumes)
initial_size = sum(p.get('allocated_capacity_gb',
p.get('provisioned_capacity_gb', 0))
for p in initial_pools_info)
new_size = sum(p.get('allocated_capacity_gb',
p.get('provisioned_capacity_gb', vol.size))
for p in new_pools_info)
self.assertEqual(initial_size + vol.size, new_size)
def test_create_volume(self):
vol = self._create_vol(self.backend)
vol_size = self._get_vol_size(vol)
self.assertSize(vol.size, vol_size)
# We are not testing delete, so leave the deletion to the tearDown
def test_create_delete_volume(self):
vol = self._create_vol(self.backend)
vol.delete()
self.assertEqual('deleted', vol.status)
self.assertTrue(vol.deleted)
self.assertNotIn(vol, self.backend.volumes)
# Confirm idempotency of the operation by deleting it again
vol._ovo.status = 'error'
vol._ovo.deleted = False
vol.delete()
self.assertEqual('deleted', vol.status)
self.assertTrue(vol.deleted)
def test_create_snapshot(self):
vol = self._create_vol(self.backend)
self._create_snap(vol)
# We are not testing delete, so leave the deletion to the tearDown
def test_create_delete_snapshot(self):
vol = self._create_vol(self.backend)
snap = self._create_snap(vol)
snap.delete()
self.assertEqual('deleted', snap.status)
self.assertTrue(snap.deleted)
self.assertNotIn(snap, vol.snapshots)
# Confirm idempotency of the operation by deleting it again
snap._ovo.status = 'error'
snap._ovo.deleted = False
snap.delete()
self.assertEqual('deleted', snap.status)
self.assertTrue(snap.deleted)
def test_attach_volume(self):
vol = self._create_vol(self.backend)
attach = vol.attach()
path = attach.path
self.assertIs(attach, vol.local_attach)
self.assertIn(attach, vol.connections)
self.assertTrue(os.path.exists(path))
# We are not testing detach, so leave it to the tearDown
def test_attach_detach_volume(self):
vol = self._create_vol(self.backend)
attach = vol.attach()
self.assertIs(attach, vol.local_attach)
self.assertIn(attach, vol.connections)
vol.detach()
self.assertIsNone(vol.local_attach)
self.assertNotIn(attach, vol.connections)
def test_attach_detach_volume_via_attachment(self):
vol = self._create_vol(self.backend)
attach = vol.attach()
self.assertTrue(attach.attached)
path = attach.path
self.assertTrue(os.path.exists(path))
attach.detach()
self.assertFalse(attach.attached)
self.assertIsNone(vol.local_attach)
# We haven't disconnected the volume, just detached it
self.assertIn(attach, vol.connections)
attach.disconnect()
self.assertNotIn(attach, vol.connections)
def test_disk_io(self):
vol = self._create_vol(self.backend)
data = self._write_data(vol)
read_data = self._read_data(vol, len(data))
self.assertEqual(data, read_data)
def test_extend(self):
vol = self._create_vol(self.backend)
original_size = vol.size
result_original_size = self._get_vol_size(vol)
self.assertSize(original_size, result_original_size)
new_size = vol.size + 1
vol.extend(new_size)
self.assertEqual(new_size, vol.size)
result_new_size = self._get_vol_size(vol)
self.assertSize(new_size, result_new_size)
def test_clone(self):
vol = self._create_vol(self.backend)
original_size = self._get_vol_size(vol, do_detach=False)
data = self._write_data(vol)
new_vol = vol.clone()
self.assertEqual(vol.size, new_vol.size)
cloned_size = self._get_vol_size(new_vol, do_detach=False)
read_data = self._read_data(new_vol, len(data))
self.assertEqual(original_size, cloned_size)
self.assertEqual(data, read_data)
def test_create_volume_from_snapshot(self):
# Create a volume and write some data
vol = self._create_vol(self.backend)
original_size = self._get_vol_size(vol, do_detach=False)
data = self._write_data(vol)
# Take a snapshot
snap = vol.create_snapshot()
self.assertEqual(vol.size, snap.volume_size)
# Change the data in the volume
reversed_data = data[::-1]
self._write_data(vol, data=reversed_data)
# Create a new volume from the snapshot with the original data
new_vol = snap.create_volume()
self.assertEqual(vol.size, new_vol.size)
created_size = self._get_vol_size(new_vol, do_detach=False)
read_data = self._read_data(new_vol, len(data))
self.assertEqual(original_size, created_size)
self.assertEqual(data, read_data)
|
class BackendFunctBasic(base_tests.BaseFunctTestCase):
def test_stats(self):
pass
def _volumes_in_pools(self, pools_info):
pass
def test_stats_with_creation(self):
pass
def test_create_volume(self):
pass
def test_create_delete_volume(self):
pass
def test_create_snapshot(self):
pass
def test_create_delete_snapshot(self):
pass
def test_attach_volume(self):
pass
def test_attach_detach_volume(self):
pass
def test_attach_detach_volume_via_attachment(self):
pass
def test_disk_io(self):
pass
def test_extend(self):
pass
def test_clone(self):
pass
def test_create_volume_from_snapshot(self):
pass
| 15 | 0 | 12 | 2 | 9 | 1 | 1 | 0.12 | 1 | 0 | 0 | 0 | 14 | 0 | 14 | 26 | 183 | 42 | 126 | 63 | 111 | 15 | 122 | 63 | 107 | 2 | 2 | 1 | 17 |
3,648 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/tests/functional/base_tests.py
|
cinderlib.tests.functional.base_tests.BaseFunctTestCase
|
class BaseFunctTestCase(unittest2.TestCase):
FNULL = open(os.devnull, 'w')
CONFIG_FILE = os.environ.get('CL_FTEST_CFG', '/etc/cinder/cinder.conf')
PRECISION = os.environ.get('CL_FTEST_PRECISION', 0)
LOGGING_ENABLED = os.environ.get('CL_FTEST_LOGGING', False)
ROOT_HELPER = os.environ.get('CL_FTEST_ROOT_HELPER', 'sudo')
tests_config = None
@classmethod
def ensure_config_loaded(cls):
if not cls.tests_config:
# If it's a .conf type of configuration file convert it to dict
if cls.CONFIG_FILE.endswith('.conf'):
cls.tests_config = cinder_to_yaml.convert(cls.CONFIG_FILE)
else:
with open(cls.CONFIG_FILE, 'r') as f:
cls.tests_config = yaml.load(f)
cls.tests_config.setdefault('logs', cls.LOGGING_ENABLED)
cls.tests_config.setdefault('size_precision', cls.PRECISION)
return cls.tests_config
@staticmethod
def _replace_oslo_cli_parse():
original_cli_parser = cfg.ConfigOpts._parse_cli_opts
def _parse_cli_opts(self, args):
return original_cli_parser(self, [])
cfg.ConfigOpts._parse_cli_opts = six.create_unbound_method(
_parse_cli_opts, cfg.ConfigOpts)
@classmethod
def setUpClass(cls):
cls._replace_oslo_cli_parse()
config = cls.ensure_config_loaded()
# Use memory_db persistence instead of memory to ensure migrations work
cinderlib.setup(root_helper=cls.ROOT_HELPER,
disable_logs=not config['logs'],
persistence_config={'storage': 'memory_db'})
# Initialize backends
cls.backends = [cinderlib.Backend(**cfg) for cfg in
config['backends']]
# Lazy load backend's _volumes variable using the volumes property so
# new volumes are added to this list on successful creation.
for backend in cls.backends:
backend.volumes
# Set current backend, by default is the first
cls.backend = cls.backends[0]
cls.size_precision = config['size_precision']
@classmethod
def tearDownClass(cls):
errors = []
# Do the cleanup of the resources the tests haven't cleaned up already
for backend in cls.backends:
# For each of the volumes that haven't been deleted delete the
# snapshots that are still there and then the volume.
# NOTE(geguileo): Don't use volumes and snapshots iterables since
# they are modified when deleting.
# NOTE(geguileo): Cleanup in reverse because RBD driver cannot
# delete a snapshot that has a volume created from it.
for vol in list(backend.volumes)[::-1]:
for snap in list(vol.snapshots):
try:
snap.delete()
except Exception as exc:
errors.append('Error deleting snapshot %s from volume '
'%s: %s' % (snap.id, vol.id, exc))
# Detach if locally attached
if vol.local_attach:
try:
vol.detach()
except Exception as exc:
errors.append('Error detaching %s for volume %s %s: '
'%s' % (vol.local_attach.path, vol.id,
exc))
# Disconnect any existing connections
for conn in vol.connections:
try:
conn.disconnect()
except Exception as exc:
errors.append('Error disconnecting volume %s: %s' %
(vol.id, exc))
try:
vol.delete()
except Exception as exc:
errors.append('Error deleting volume %s: %s' %
(vol.id, exc))
if errors:
raise Exception('Errors on test cleanup: %s' % '\n\t'.join(errors))
def _root_execute(self, *args, **kwargs):
cmd = [self.ROOT_HELPER]
cmd.extend(args)
cmd.extend("%s=%s" % (k, v) for k, v in kwargs.items())
return subprocess.check_output(cmd, stderr=self.FNULL)
def _create_vol(self, backend=None, **kwargs):
if not backend:
backend = self.backend
vol_size = kwargs.setdefault('size', 1)
name = kwargs.setdefault('name', backend.id)
vol = backend.create_volume(**kwargs)
self.assertEqual('available', vol.status)
self.assertEqual(vol_size, vol.size)
self.assertEqual(name, vol.display_name)
self.assertIn(vol, backend.volumes)
return vol
def _create_snap(self, vol, **kwargs):
name = kwargs.setdefault('name', vol.id)
snap = vol.create_snapshot(name=vol.id)
self.assertEqual('available', snap.status)
self.assertEqual(vol.size, snap.volume_size)
self.assertEqual(name, snap.display_name)
self.assertIn(snap, vol.snapshots)
return snap
def _get_vol_size(self, vol, do_detach=True):
if not vol.local_attach:
vol.attach()
try:
while True:
try:
result = self._root_execute('lsblk', '-o', 'SIZE',
'-b', vol.local_attach.path)
size_bytes = result.split()[1]
return float(size_bytes) / 1024.0 / 1024.0 / 1024.0
# NOTE(geguileo): We can't catch subprocess.CalledProcessError
# because somehow we get an instance from a different
# subprocess.CalledProcessError class that isn't the same.
except Exception as exc:
# If the volume is not yet available
if getattr(exc, 'returncode', 0) != 32:
raise
finally:
if do_detach:
vol.detach()
def _write_data(self, vol, data=None, do_detach=True):
if not data:
data = b'0123456789' * 100
if not vol.local_attach:
vol.attach()
# TODO(geguileo: This will not work on Windows, for that we need to
# pass delete=False and do the manual deletion ourselves.
try:
with tempfile.NamedTemporaryFile() as f:
f.write(data)
f.flush()
self._root_execute('dd', 'if=' + f.name,
of=vol.local_attach.path)
finally:
if do_detach:
vol.detach()
return data
def _read_data(self, vol, length, do_detach=True):
if not vol.local_attach:
vol.attach()
try:
stdout = self._root_execute('dd', 'if=' + vol.local_attach.path,
count=1, ibs=length)
finally:
if do_detach:
vol.detach()
return stdout
def _pools_info(self, stats):
return stats.get('pools', [stats])
def assertSize(self, expected_size, actual_size):
if self.size_precision:
self.assertAlmostEqual(expected_size, actual_size,
self.size_precision)
else:
self.assertEqual(expected_size, actual_size)
|
class BaseFunctTestCase(unittest2.TestCase):
@classmethod
def ensure_config_loaded(cls):
pass
@staticmethod
def _replace_oslo_cli_parse():
pass
def _parse_cli_opts(self, args):
pass
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def _root_execute(self, *args, **kwargs):
pass
def _create_vol(self, backend=None, **kwargs):
pass
def _create_snap(self, vol, **kwargs):
pass
def _get_vol_size(self, vol, do_detach=True):
pass
def _write_data(self, vol, data=None, do_detach=True):
pass
def _read_data(self, vol, length, do_detach=True):
pass
def _pools_info(self, stats):
pass
def assertSize(self, expected_size, actual_size):
pass
| 18 | 0 | 13 | 1 | 10 | 2 | 3 | 0.15 | 1 | 4 | 1 | 1 | 8 | 0 | 12 | 12 | 191 | 28 | 142 | 46 | 124 | 21 | 120 | 37 | 106 | 11 | 1 | 4 | 38 |
3,649 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/persistence/memory.py
|
cinderlib.persistence.memory.MemoryPersistence
|
class MemoryPersistence(persistence_base.PersistenceDriverBase):
volumes = {}
snapshots = {}
connections = {}
key_values = {}
def __init__(self):
# Create fake DB for drivers
self.fake_db = persistence_base.DB(self)
super(MemoryPersistence, self).__init__()
@property
def db(self):
return self.fake_db
@staticmethod
def _get_field(res, field):
res = getattr(res, field)
if field == 'host':
res = res.split('@')[1].split('#')[0]
return res
def _filter_by(self, values, field, value):
if not value:
return values
return [res for res in values if self._get_field(res, field) == value]
def get_volumes(self, volume_id=None, volume_name=None, backend_name=None):
try:
res = ([self.volumes[volume_id]] if volume_id
else self.volumes.values())
except KeyError:
return []
res = self._filter_by(res, 'display_name', volume_name)
res = self._filter_by(res, 'host', backend_name)
return res
def get_snapshots(self, snapshot_id=None, snapshot_name=None,
volume_id=None):
try:
result = ([self.snapshots[snapshot_id]] if snapshot_id
else self.snapshots.values())
except KeyError:
return []
result = self._filter_by(result, 'volume_id', volume_id)
result = self._filter_by(result, 'display_name', snapshot_name)
return result
def get_connections(self, connection_id=None, volume_id=None):
try:
result = ([self.connections[connection_id]] if connection_id
else self.connections.values())
except KeyError:
return []
result = self._filter_by(result, 'volume_id', volume_id)
return result
def get_key_values(self, key=None):
try:
result = ([self.key_values[key]] if key
else list(self.key_values.values()))
except KeyError:
return []
return result
def set_volume(self, volume):
self.volumes[volume.id] = volume
super(MemoryPersistence, self).set_volume(volume)
def set_snapshot(self, snapshot):
self.snapshots[snapshot.id] = snapshot
super(MemoryPersistence, self).set_snapshot(snapshot)
def set_connection(self, connection):
self.connections[connection.id] = connection
super(MemoryPersistence, self).set_connection(connection)
def set_key_value(self, key_value):
self.key_values[key_value.key] = key_value
def delete_volume(self, volume):
self.volumes.pop(volume.id, None)
super(MemoryPersistence, self).delete_volume(volume)
def delete_snapshot(self, snapshot):
self.snapshots.pop(snapshot.id, None)
super(MemoryPersistence, self).delete_snapshot(snapshot)
def delete_connection(self, connection):
self.connections.pop(connection.id, None)
super(MemoryPersistence, self).delete_connection(connection)
def delete_key_value(self, key_value):
self.key_values.pop(key_value.key, None)
|
class MemoryPersistence(persistence_base.PersistenceDriverBase):
def __init__(self):
pass
@property
def db(self):
pass
@staticmethod
def _get_field(res, field):
pass
def _filter_by(self, values, field, value):
pass
def get_volumes(self, volume_id=None, volume_name=None, backend_name=None):
pass
def get_snapshots(self, snapshot_id=None, snapshot_name=None,
volume_id=None):
pass
def get_connections(self, connection_id=None, volume_id=None):
pass
def get_key_values(self, key=None):
pass
def set_volume(self, volume):
pass
def set_snapshot(self, snapshot):
pass
def set_connection(self, connection):
pass
def set_key_value(self, key_value):
pass
def delete_volume(self, volume):
pass
def delete_snapshot(self, snapshot):
pass
def delete_connection(self, connection):
pass
def delete_key_value(self, key_value):
pass
| 19 | 0 | 5 | 0 | 4 | 0 | 2 | 0.03 | 1 | 4 | 1 | 0 | 15 | 1 | 16 | 34 | 95 | 17 | 77 | 29 | 57 | 2 | 70 | 26 | 53 | 3 | 2 | 1 | 26 |
3,650 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/persistence/dbms.py
|
cinderlib.persistence.dbms.MemoryDBPersistence
|
class MemoryDBPersistence(DBPersistence):
def __init__(self):
super(MemoryDBPersistence, self).__init__(connection='sqlite://')
|
class MemoryDBPersistence(DBPersistence):
def __init__(self):
pass
| 2 | 0 | 2 | 0 | 2 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 40 | 3 | 0 | 3 | 2 | 1 | 0 | 3 | 2 | 1 | 1 | 3 | 0 | 1 |
3,651 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/persistence/dbms.py
|
cinderlib.persistence.dbms.KeyValue
|
class KeyValue(models.BASE, models.models.ModelBase, objects.KeyValue):
__tablename__ = 'cinderlib_persistence_key_value'
key = models.Column(models.String(255), primary_key=True)
value = models.Column(models.Text)
|
class KeyValue(models.BASE, models.models.ModelBase, objects.KeyValue):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0 | 4 | 4 | 3 | 0 | 4 | 4 | 3 | 0 | 1 | 0 | 0 |
3,652 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/persistence/dbms.py
|
cinderlib.persistence.dbms.DBPersistence
|
class DBPersistence(persistence_base.PersistenceDriverBase):
GET_METHODS_PER_DB_MODEL = {
cinder_objs.VolumeType.model: 'volume_type_get',
cinder_objs.QualityOfServiceSpecs.model: 'qos_specs_get',
}
def __init__(self, connection, sqlite_synchronous=True,
soft_deletes=False):
self.soft_deletes = soft_deletes
cfg.CONF.set_override('connection', connection, 'database')
cfg.CONF.set_override('sqlite_synchronous',
sqlite_synchronous,
'database')
# Suppress logging for migration
migrate_logger = logging.getLogger('migrate')
migrate_logger.setLevel(logging.WARNING)
self._clear_facade()
self.db_instance = db_api.oslo_db_api.DBAPI.from_config(
conf=cfg.CONF, backend_mapping=db_api._BACKEND_MAPPING,
lazy=True)
# We need to wrap some get methods that get called before the volume is
# actually created.
self.original_vol_type_get = self.db_instance.volume_type_get
self.db_instance.volume_type_get = self.vol_type_get
self.original_qos_specs_get = self.db_instance.qos_specs_get
self.db_instance.qos_specs_get = self.qos_specs_get
self.original_get_by_id = self.db_instance.get_by_id
self.db_instance.get_by_id = self.get_by_id
migration.db_sync()
self._create_key_value_table()
super(DBPersistence, self).__init__()
def vol_type_get(self, context, id, inactive=False,
expected_fields=None):
if id not in objects.Backend._volumes_inflight:
return self.original_vol_type_get(context, id, inactive)
vol = objects.Backend._volumes_inflight[id]._ovo
if not vol.volume_type_id:
return None
return persistence_base.vol_type_to_dict(vol.volume_type)
def qos_specs_get(self, context, qos_specs_id, inactive=False):
if qos_specs_id not in objects.Backend._volumes_inflight:
return self.original_qos_specs_get(context, qos_specs_id, inactive)
vol = objects.Backend._volumes_inflight[qos_specs_id]._ovo
if not vol.volume_type_id:
return None
return persistence_base.vol_type_to_dict(vol.volume_type)['qos_specs']
def get_by_id(self, context, model, id, *args, **kwargs):
if model not in self.GET_METHODS_PER_DB_MODEL:
return self.original_get_by_id(context, model, id, *args, **kwargs)
method = getattr(self, self.GET_METHODS_PER_DB_MODEL[model])
return method(context, id)
def _clear_facade(self):
# This is for Pike
if hasattr(sqla_api, '_FACADE'):
sqla_api._FACADE = None
# This is for Queens and Rocky (untested)
elif hasattr(sqla_api, 'configure'):
sqla_api.configure(cfg.CONF)
def _create_key_value_table(self):
models.BASE.metadata.create_all(sqla_api.get_engine(),
tables=[KeyValue.__table__])
@property
def db(self):
return self.db_instance
@staticmethod
def _build_filter(**kwargs):
return {key: value for key, value in kwargs.items() if value}
def get_volumes(self, volume_id=None, volume_name=None, backend_name=None):
# Use the % wildcard to ignore the host name on the backend_name search
host = '%@' + backend_name if backend_name else None
filters = self._build_filter(id=volume_id, display_name=volume_name,
host=host)
LOG.debug('get_volumes for %s', filters)
ovos = cinder_objs.VolumeList.get_all(objects.CONTEXT, filters=filters)
result = []
for ovo in ovos:
backend = ovo.host.split('@')[-1].split('#')[0]
# Trigger lazy loading of specs
if ovo.volume_type_id:
ovo.volume_type.extra_specs
ovo.volume_type.qos_specs
result.append(objects.Volume(backend, __ovo=ovo))
return result
def get_snapshots(self, snapshot_id=None, snapshot_name=None,
volume_id=None):
filters = self._build_filter(id=snapshot_id, volume_id=volume_id,
display_name=snapshot_name)
LOG.debug('get_snapshots for %s', filters)
ovos = cinder_objs.SnapshotList.get_all(objects.CONTEXT,
filters=filters)
result = [objects.Snapshot(None, __ovo=ovo) for ovo in ovos.objects]
return result
def get_connections(self, connection_id=None, volume_id=None):
filters = self._build_filter(id=connection_id, volume_id=volume_id)
LOG.debug('get_connections for %s', filters)
ovos = cinder_objs.VolumeAttachmentList.get_all(objects.CONTEXT,
filters)
# Leverage lazy loading of the volume and backend in Connection
result = [objects.Connection(None, volume=None, __ovo=ovo)
for ovo in ovos.objects]
return result
def _get_kv(self, key=None, session=None):
session = session or sqla_api.get_session()
query = session.query(KeyValue)
if key is not None:
query = query.filter_by(key=key)
res = query.all()
# If we want to use the result as an ORM
if session:
return res
return [objects.KeyValue(r.key, r.value) for r in res]
def get_key_values(self, key=None):
return self._get_kv(key)
def set_volume(self, volume):
changed = self.get_changed_fields(volume)
if not changed:
changed = self.get_fields(volume)
extra_specs = changed.pop('extra_specs', None)
qos_specs = changed.pop('qos_specs', None)
# Since OVOs are not tracking QoS or Extra specs dictionary changes,
# we only support setting QoS or Extra specs on creation or add them
# later.
if changed.get('volume_type_id'):
vol_type_fields = {'id': volume.volume_type_id,
'name': volume.volume_type_id,
'extra_specs': extra_specs,
'is_public': True}
if qos_specs:
res = self.db.qos_specs_create(objects.CONTEXT,
{'name': volume.volume_type_id,
'consumer': 'back-end',
'specs': qos_specs})
# Cinder is automatically generating an ID, replace it
query = sqla_api.model_query(objects.CONTEXT,
models.QualityOfServiceSpecs)
query.filter_by(id=res['id']).update(
{'id': volume.volume_type.qos_specs_id})
self.db.volume_type_create(objects.CONTEXT, vol_type_fields)
else:
if extra_specs is not None:
self.db.volume_type_extra_specs_update_or_create(
objects.CONTEXT, volume.volume_type_id, extra_specs)
self.db.qos_specs_update(objects.CONTEXT,
volume.volume_type.qos_specs_id,
{'name': volume.volume_type_id,
'consumer': 'back-end',
'specs': qos_specs})
# Create the volume
if 'id' in changed:
LOG.debug('set_volume creating %s', changed)
try:
self.db.volume_create(objects.CONTEXT, changed)
changed = None
except exception.DBDuplicateEntry:
del changed['id']
if changed:
LOG.debug('set_volume updating %s', changed)
self.db.volume_update(objects.CONTEXT, volume.id, changed)
super(DBPersistence, self).set_volume(volume)
def set_snapshot(self, snapshot):
changed = self.get_changed_fields(snapshot)
if not changed:
changed = self.get_fields(snapshot)
# Create
if 'id' in changed:
LOG.debug('set_snapshot creating %s', changed)
try:
self.db.snapshot_create(objects.CONTEXT, changed)
changed = None
except exception.DBDuplicateEntry:
del changed['id']
if changed:
LOG.debug('set_snapshot updating %s', changed)
self.db.snapshot_update(objects.CONTEXT, snapshot.id, changed)
super(DBPersistence, self).set_snapshot(snapshot)
def set_connection(self, connection):
changed = self.get_changed_fields(connection)
if not changed:
changed = self.get_fields(connection)
if 'connection_info' in changed:
connection._convert_connection_info_to_db_format(changed)
if 'connector' in changed:
connection._convert_connector_to_db_format(changed)
# Create
if 'id' in changed:
LOG.debug('set_connection creating %s', changed)
try:
sqla_api.volume_attach(objects.CONTEXT, changed)
changed = None
except exception.DBDuplicateEntry:
del changed['id']
if changed:
LOG.debug('set_connection updating %s', changed)
self.db.volume_attachment_update(objects.CONTEXT, connection.id,
changed)
super(DBPersistence, self).set_connection(connection)
def set_key_value(self, key_value):
session = sqla_api.get_session()
with session.begin():
kv = self._get_kv(key_value.key, session)
kv = kv[0] if kv else KeyValue(key=key_value.key)
kv.value = key_value.value
session.add(kv)
def delete_volume(self, volume):
if self.soft_deletes:
LOG.debug('soft deleting volume %s', volume.id)
self.db.volume_destroy(objects.CONTEXT, volume.id)
if volume.volume_type_id:
LOG.debug('soft deleting volume type %s',
volume.volume_type_id)
self.db.volume_destroy(objects.CONTEXT, volume.volume_type_id)
if volume.volume_type.qos_specs_id:
self.db.qos_specs_delete(objects.CONTEXT,
volume.volume_type.qos_specs_id)
else:
LOG.debug('hard deleting volume %s', volume.id)
query = sqla_api.model_query(objects.CONTEXT, models.Volume)
query.filter_by(id=volume.id).delete()
if volume.volume_type_id:
LOG.debug('hard deleting volume type %s',
volume.volume_type_id)
query = sqla_api.model_query(objects.CONTEXT,
models.VolumeTypeExtraSpecs)
query.filter_by(volume_type_id=volume.volume_type_id).delete()
query = sqla_api.model_query(objects.CONTEXT,
models.VolumeType)
query.filter_by(id=volume.volume_type_id).delete()
query = sqla_api.model_query(objects.CONTEXT,
models.QualityOfServiceSpecs)
qos_id = volume.volume_type.qos_specs_id
if qos_id:
query.filter(sqla_api.or_(
models.QualityOfServiceSpecs.id == qos_id,
models.QualityOfServiceSpecs.specs_id == qos_id
)).delete()
super(DBPersistence, self).delete_volume(volume)
def delete_snapshot(self, snapshot):
if self.soft_deletes:
LOG.debug('soft deleting snapshot %s', snapshot.id)
self.db.snapshot_destroy(objects.CONTEXT, snapshot.id)
else:
LOG.debug('hard deleting snapshot %s', snapshot.id)
query = sqla_api.model_query(objects.CONTEXT, models.Snapshot)
query.filter_by(id=snapshot.id).delete()
super(DBPersistence, self).delete_snapshot(snapshot)
def delete_connection(self, connection):
if self.soft_deletes:
LOG.debug('soft deleting connection %s', connection.id)
self.db.attachment_destroy(objects.CONTEXT, connection.id)
else:
LOG.debug('hard deleting connection %s', connection.id)
query = sqla_api.model_query(objects.CONTEXT,
models.VolumeAttachment)
query.filter_by(id=connection.id).delete()
super(DBPersistence, self).delete_connection(connection)
def delete_key_value(self, key_value):
query = sqla_api.get_session().query(KeyValue)
query.filter_by(key=key_value.key).delete()
|
class DBPersistence(persistence_base.PersistenceDriverBase):
def __init__(self, connection, sqlite_synchronous=True,
soft_deletes=False):
pass
def vol_type_get(self, context, id, inactive=False,
expected_fields=None):
pass
def qos_specs_get(self, context, qos_specs_id, inactive=False):
pass
def get_by_id(self, context, model, id, *args, **kwargs):
pass
def _clear_facade(self):
pass
def _create_key_value_table(self):
pass
@property
def db(self):
pass
@staticmethod
def _build_filter(**kwargs):
pass
def get_volumes(self, volume_id=None, volume_name=None, backend_name=None):
pass
def get_snapshots(self, snapshot_id=None, snapshot_name=None,
volume_id=None):
pass
def get_connections(self, connection_id=None, volume_id=None):
pass
def _get_kv(self, key=None, session=None):
pass
def get_key_values(self, key=None):
pass
def set_volume(self, volume):
pass
def set_snapshot(self, snapshot):
pass
def set_connection(self, connection):
pass
def set_key_value(self, key_value):
pass
def delete_volume(self, volume):
pass
def delete_snapshot(self, snapshot):
pass
def delete_connection(self, connection):
pass
def delete_key_value(self, key_value):
pass
| 24 | 0 | 13 | 1 | 11 | 1 | 3 | 0.07 | 1 | 2 | 1 | 1 | 20 | 5 | 21 | 39 | 301 | 44 | 241 | 66 | 214 | 17 | 194 | 61 | 172 | 8 | 2 | 3 | 58 |
3,653 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/tests/unit/persistence/test_memory.py
|
cinderlib.tests.unit.persistence.test_memory.TestMemoryPersistence
|
class TestMemoryPersistence(base.BasePersistenceTest):
PERSISTENCE_CFG = {'storage': 'memory'}
def tearDown(self):
# Since this plugin uses class attributes we have to clear them
self.persistence.volumes = {}
self.persistence.snapshots = {}
self.persistence.connections = {}
self.persistence.key_values = {}
super(TestMemoryPersistence, self).tearDown()
def test_db(self):
self.assertIsInstance(self.persistence.db,
cinderlib.persistence.base.DB)
def test_set_volume(self):
vol = cinderlib.Volume(self.backend, size=1, name='disk')
self.assertDictEqual({}, self.persistence.volumes)
self.persistence.set_volume(vol)
self.assertDictEqual({vol.id: vol}, self.persistence.volumes)
def test_set_snapshot(self):
vol = cinderlib.Volume(self.backend, size=1, name='disk')
snap = cinderlib.Snapshot(vol, name='disk')
self.assertDictEqual({}, self.persistence.snapshots)
self.persistence.set_snapshot(snap)
self.assertDictEqual({snap.id: snap}, self.persistence.snapshots)
def test_set_connection(self):
vol = cinderlib.Volume(self.backend, size=1, name='disk')
conn = cinderlib.Connection(self.backend, volume=vol, connector={},
connection_info={'conn': {'data': {}}})
self.assertDictEqual({}, self.persistence.connections)
self.persistence.set_connection(conn)
self.assertDictEqual({conn.id: conn}, self.persistence.connections)
def test_set_key_values(self):
self.assertDictEqual({}, self.persistence.key_values)
expected = [cinderlib.KeyValue('key', 'value')]
self.persistence.set_key_value(expected[0])
self.assertTrue('key' in self.persistence.key_values)
self.assertEqual(expected, list(self.persistence.key_values.values()))
|
class TestMemoryPersistence(base.BasePersistenceTest):
def tearDown(self):
pass
def test_db(self):
pass
def test_set_volume(self):
pass
def test_set_snapshot(self):
pass
def test_set_connection(self):
pass
def test_set_key_values(self):
pass
| 7 | 0 | 7 | 1 | 6 | 0 | 1 | 0.03 | 1 | 2 | 0 | 0 | 6 | 1 | 6 | 69 | 47 | 11 | 35 | 14 | 28 | 1 | 33 | 14 | 26 | 1 | 3 | 0 | 6 |
3,654 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/persistence/__init__.py
|
cinderlib.persistence.MyDict
|
class MyDict(dict):
"""Custom non clearable dictionary.
Required to overcome the nature of oslo.config where configuration comes
from files and command line input.
Using this dictionary we can load from memory everything and it won't clear
things when we dynamically load a driver and the driver has new
configuration options.
"""
def clear(self):
pass
|
class MyDict(dict):
'''Custom non clearable dictionary.
Required to overcome the nature of oslo.config where configuration comes
from files and command line input.
Using this dictionary we can load from memory everything and it won't clear
things when we dynamically load a driver and the driver has new
configuration options.
'''
def clear(self):
pass
| 2 | 1 | 2 | 0 | 2 | 0 | 1 | 2.33 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 28 | 12 | 2 | 3 | 2 | 1 | 7 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
3,655 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/persistence/base.py
|
cinderlib.persistence.base.DB
|
class DB(object):
"""Replacement for DB access methods.
This will serve as replacement for methods used by:
- Drivers
- OVOs' get_by_id and save methods
- DB implementation
Data will be retrieved using the persistence driver we setup.
"""
GET_METHODS_PER_DB_MODEL = {
objects.Volume.model: 'volume_get',
objects.VolumeType.model: 'volume_type_get',
objects.Snapshot.model: 'snapshot_get',
objects.QualityOfServiceSpecs.model: 'qos_specs_get',
}
def __init__(self, persistence_driver):
self.persistence = persistence_driver
# Replace get_by_id OVO methods with something that will return
# expected data
objects.Volume.get_by_id = self.volume_get
objects.Snapshot.get_by_id = self.snapshot_get
# Disable saving in OVOs
for ovo_name in cinder_base_ovo.CinderObjectRegistry.obj_classes():
ovo_cls = getattr(objects, ovo_name)
ovo_cls.save = lambda *args, **kwargs: None
def volume_get(self, context, volume_id, *args, **kwargs):
return self.persistence.get_volumes(volume_id)[0]._ovo
def snapshot_get(self, context, snapshot_id, *args, **kwargs):
return self.persistence.get_snapshots(snapshot_id)[0]._ovo
def volume_type_get(self, context, id, inactive=False,
expected_fields=None):
if id in cinderlib.Backend._volumes_inflight:
vol = cinderlib.Backend._volumes_inflight[id]
else:
vol = self.persistence.get_volumes(id)[0]
if not vol._ovo.volume_type_id:
return None
return vol_type_to_dict(vol._ovo.volume_type)
def qos_specs_get(self, context, qos_specs_id, inactive=False):
if qos_specs_id in cinderlib.Backend._volumes_inflight:
vol = cinderlib.Backend._volumes_inflight[qos_specs_id]
else:
vol = self.persistence.get_volumes(qos_specs_id)[0]
if not vol._ovo.volume_type_id:
return None
return vol_type_to_dict(vol._ovo.volume_type)['qos_specs']
@classmethod
def image_volume_cache_get_by_volume_id(cls, context, volume_id):
return None
def get_by_id(self, context, model, id, *args, **kwargs):
method = getattr(self, self.GET_METHODS_PER_DB_MODEL[model])
return method(context, id)
|
class DB(object):
'''Replacement for DB access methods.
This will serve as replacement for methods used by:
- Drivers
- OVOs' get_by_id and save methods
- DB implementation
Data will be retrieved using the persistence driver we setup.
'''
def __init__(self, persistence_driver):
pass
def volume_get(self, context, volume_id, *args, **kwargs):
pass
def snapshot_get(self, context, snapshot_id, *args, **kwargs):
pass
def volume_type_get(self, context, id, inactive=False,
expected_fields=None):
pass
def qos_specs_get(self, context, qos_specs_id, inactive=False):
pass
@classmethod
def image_volume_cache_get_by_volume_id(cls, context, volume_id):
pass
def get_by_id(self, context, model, id, *args, **kwargs):
pass
| 9 | 1 | 6 | 0 | 5 | 0 | 2 | 0.24 | 1 | 1 | 1 | 0 | 6 | 1 | 7 | 7 | 64 | 13 | 41 | 17 | 31 | 10 | 32 | 15 | 24 | 3 | 1 | 1 | 12 |
3,656 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/nos_brick.py
|
cinderlib.nos_brick.RBDConnector
|
class RBDConnector(connectors.rbd.RBDConnector):
""""Connector class to attach/detach RBD volumes locally.
OS-Brick's implementation covers only 2 cases:
- Local attachment on controller node.
- Returning a file object on non controller nodes.
We need a third one, local attachment on non controller node.
"""
def connect_volume(self, connection_properties):
# NOTE(e0ne): sanity check if ceph-common is installed.
self._setup_rbd_class()
# Extract connection parameters and generate config file
try:
user = connection_properties['auth_username']
pool, volume = connection_properties['name'].split('/')
cluster_name = connection_properties.get('cluster_name')
monitor_ips = connection_properties.get('hosts')
monitor_ports = connection_properties.get('ports')
keyring = connection_properties.get('keyring')
except IndexError:
msg = 'Malformed connection properties'
raise exception.BrickException(msg)
conf = self._create_ceph_conf(monitor_ips, monitor_ports,
str(cluster_name), user,
keyring)
link_name = self.get_rbd_device_name(pool, volume)
real_path = os.path.realpath(link_name)
try:
# Map RBD volume if it's not already mapped
if not os.path.islink(link_name) or not os.path.exists(real_path):
cmd = ['rbd', 'map', volume, '--pool', pool, '--conf', conf]
cmd += self._get_rbd_args(connection_properties)
stdout, stderr = self._execute(*cmd,
root_helper=self._root_helper,
run_as_root=True)
real_path = stdout.strip()
# The host may not have RBD installed, and therefore won't
# create the symlinks, ensure they exist
if self.containerized:
self._ensure_link(real_path, link_name)
except Exception:
fileutils.delete_if_exists(conf)
raise
return {'path': real_path,
'conf': conf,
'type': 'block'}
def _ensure_link(self, source, link_name):
self._ensure_dir(os.path.dirname(link_name))
if self.im_root:
try:
os.symlink(source, link_name)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
# If we have a leftover link, clean it up
if source != os.path.realpath(link_name):
os.remove(link_name)
os.symlink(source, link_name)
else:
self._execute('ln', '-s', '-f', source, link_name,
run_as_root=True)
def check_valid_device(self, path, run_as_root=True):
"""Verify an existing RBD handle is connected and valid."""
if self.im_root:
try:
with open(path, 'r') as f:
f.read(4096)
except Exception:
return False
return True
try:
self._execute('dd', 'if=' + path, 'of=/dev/null', 'bs=4096',
'count=1', root_helper=self._root_helper,
run_as_root=True)
except putils.ProcessExecutionError:
return False
return True
def disconnect_volume(self, connection_properties, device_info,
force=False, ignore_errors=False):
self._setup_rbd_class()
pool, volume = connection_properties['name'].split('/')
conf_file = device_info['conf']
link_name = self.get_rbd_device_name(pool, volume)
real_dev_path = os.path.realpath(link_name)
if os.path.exists(real_dev_path):
cmd = ['rbd', 'unmap', real_dev_path, '--conf', conf_file]
cmd += self._get_rbd_args(connection_properties)
self._execute(*cmd, root_helper=self._root_helper,
run_as_root=True)
if self.containerized:
unlink_root(link_name)
fileutils.delete_if_exists(conf_file)
def _ensure_dir(self, path):
if self.im_root:
try:
os.makedirs(path, 0o755)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
else:
self._execute('mkdir', '-p', '-m0755', path, run_as_root=True)
def _setup_class(self):
try:
self._execute('which', 'rbd')
except putils.ProcessExecutionError:
msg = 'ceph-common package not installed'
raise exception.BrickException(msg)
RBDConnector.im_root = os.getuid() == 0
# Check if we are running containerized
RBDConnector.containerized = os.stat('/proc').st_dev > 4
# Don't check again to speed things on following connections
RBDConnector._setup_rbd_class = lambda *args: None
_setup_rbd_class = _setup_class
|
class RBDConnector(connectors.rbd.RBDConnector):
'''"Connector class to attach/detach RBD volumes locally.
OS-Brick's implementation covers only 2 cases:
- Local attachment on controller node.
- Returning a file object on non controller nodes.
We need a third one, local attachment on non controller node.
'''
def connect_volume(self, connection_properties):
pass
def _ensure_link(self, source, link_name):
pass
def check_valid_device(self, path, run_as_root=True):
'''Verify an existing RBD handle is connected and valid.'''
pass
def disconnect_volume(self, connection_properties, device_info,
force=False, ignore_errors=False):
pass
def _ensure_dir(self, path):
pass
def _setup_class(self):
pass
| 7 | 2 | 19 | 2 | 16 | 2 | 4 | 0.15 | 1 | 4 | 0 | 0 | 6 | 1 | 6 | 6 | 131 | 19 | 97 | 31 | 89 | 15 | 84 | 26 | 77 | 5 | 1 | 3 | 23 |
3,657 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/objects.py
|
cinderlib.objects.Connection
|
class Connection(Object, LazyVolumeAttr):
"""Cinderlib Connection info that maps to VolumeAttachment.
On Pike we don't have the connector field on the VolumeAttachment ORM
instance so we use the connection_info to store everything.
We'll have a dictionary:
{'conn': connection info
'connector': connector dictionary
'device': result of connect_volume}
"""
OVO_CLASS = cinder_objs.VolumeAttachment
SIMPLE_JSON_IGNORE = ('volume',)
@classmethod
def connect(cls, volume, connector, **kwargs):
conn_info = volume.backend.driver.initialize_connection(
volume._ovo, connector)
conn = cls(volume.backend,
connector=connector,
volume=volume,
status='attached',
connection_info={'conn': conn_info},
**kwargs)
conn.connector_info = connector
conn.save()
return conn
@staticmethod
def _is_multipathed_conn(kwargs):
# Priority:
# - kwargs['use_multipath']
# - Multipath in connector_dict in kwargs or _ovo
# - Detect from connection_info data from OVO in kwargs
if 'use_multipath' in kwargs:
return kwargs['use_multipath']
connector = kwargs.get('connector') or {}
conn_info = kwargs.get('connection_info') or {}
if '__ovo' in kwargs:
ovo = kwargs['__ovo']
conn_info = conn_info or ovo.connection_info or {}
connector = connector or ovo.connection_info.get('connector') or {}
if 'multipath' in connector:
return connector['multipath']
# If multipathed not defined autodetect based on connection info
conn_info = conn_info['conn'].get('data', {})
iscsi_mp = 'target_iqns' in conn_info and 'target_portals' in conn_info
fc_mp = not isinstance(conn_info.get('target_wwn', ''),
six.string_types)
return iscsi_mp or fc_mp
def __init__(self, *args, **kwargs):
self.use_multipath = self._is_multipathed_conn(kwargs)
scan_attempts = brick_initiator.DEVICE_SCAN_ATTEMPTS_DEFAULT
self.scan_attempts = kwargs.pop('device_scan_attempts', scan_attempts)
volume = kwargs.pop('volume', None)
self._connector = None
if not kwargs.get('attach_mode'):
kwargs['attach_mode'] = 'rw'
super(Connection, self).__init__(*args, **kwargs)
LazyVolumeAttr.__init__(self, volume)
@property
def conn_info(self):
conn_info = self._ovo.connection_info
if conn_info:
conn = conn_info.get('conn')
data = conn.get('data')
if data is not None:
data['access_mode'] = self._ovo.attach_mode
return conn
return {}
@conn_info.setter
def conn_info(self, value):
if not value:
self._ovo.connection_info = None
return
if self._ovo.connection_info is None:
self._ovo.connection_info = {}
self._ovo.connection_info['conn'] = value
@property
def protocol(self):
return self.conn_info.get('driver_volume_type')
@property
def connector_info(self):
if self.connection_info:
return self.connection_info.get('connector')
return None
@connector_info.setter
def connector_info(self, value):
if self._ovo.connection_info is None:
self._ovo.connection_info = {}
self.connection_info['connector'] = value
# Since we are changing the dictionary the OVO won't detect the change
self._changed_fields.add('connection_info')
@property
def device(self):
if self.connection_info:
return self.connection_info.get('device')
return None
@device.setter
def device(self, value):
if value:
self.connection_info['device'] = value
else:
self.connection_info.pop('device', None)
# Since we are changing the dictionary the OVO won't detect the change
self._changed_fields.add('connection_info')
@property
def path(self):
device = self.device
if not device:
return None
return device['path']
@property
def connector(self):
if not self._connector:
if not self.conn_info:
return None
self._connector = brick_connector.InitiatorConnector.factory(
self.protocol, self.backend_class.root_helper,
use_multipath=self.use_multipath,
device_scan_attempts=self.scan_attempts,
# NOTE(geguileo): afaik only remotefs uses the connection info
conn=self.conn_info,
do_local_attach=True)
return self._connector
@property
def attached(self):
return bool(self.device)
@property
def connected(self):
return bool(self.conn_info)
@classmethod
def _load(cls, backend, ovo, volume=None, save=False):
# We let the __init__ method set the _volume if exists
conn = cls(backend, __ovo=ovo, volume=volume)
if save:
conn.save()
# Restore circular reference only if we have all the elements
if conn._volume:
utils.add_by_id(conn, conn._volume._connections)
connections = getattr(conn._volume._ovo,
CONNECTIONS_OVO_FIELD).objects
utils.add_by_id(conn._ovo, connections)
return conn
def _disconnect(self, force=False):
self.backend.driver.terminate_connection(self.volume._ovo,
self.connector_info,
force=force)
self.conn_info = None
self._ovo.status = 'detached'
self.persistence.delete_connection(self)
def disconnect(self, force=False):
self._disconnect(force)
self.volume._disconnect(self)
def device_attached(self, device):
self.device = device
self.save()
def attach(self):
device = self.connector.connect_volume(self.conn_info['data'])
self.device_attached(device)
try:
if self.connector.check_valid_device(self.path):
error_msg = None
else:
error_msg = ('Unable to access the backend storage via path '
'%s.' % self.path)
except Exception:
error_msg = ('Could not validate device %s. There may be missing '
'packages on your host.' % self.path)
LOG.exception(error_msg)
if error_msg:
self.detach(force=True, ignore_errors=True)
raise cinder_exception.DeviceUnavailable(
path=self.path, attach_info=self._ovo.connection_info,
reason=error_msg)
if self._volume:
self.volume.local_attach = self
def detach(self, force=False, ignore_errors=False, exc=None):
if not exc:
exc = brick_exception.ExceptionChainer()
with exc.context(force, 'Disconnect failed'):
self.connector.disconnect_volume(self.conn_info['data'],
self.device,
force=force,
ignore_errors=ignore_errors)
if not exc or ignore_errors:
if self._volume:
self.volume.local_attach = None
self.device = None
self.save()
self._connector = None
if exc and not ignore_errors:
raise exc
@classmethod
def get_by_id(cls, connection_id):
result = cls.persistence.get_connections(connection_id=connection_id)
if not result:
msg = 'id=%s' % connection_id
raise exception.ConnectionNotFound(filter=msg)
return result[0]
@property
def backend(self):
if self._backend is None and hasattr(self, '_volume'):
self._backend = self.volume.backend
return self._backend
@backend.setter
def backend(self, value):
self._backend = value
def save(self):
self.persistence.set_connection(self)
|
class Connection(Object, LazyVolumeAttr):
'''Cinderlib Connection info that maps to VolumeAttachment.
On Pike we don't have the connector field on the VolumeAttachment ORM
instance so we use the connection_info to store everything.
We'll have a dictionary:
{'conn': connection info
'connector': connector dictionary
'device': result of connect_volume}
'''
@classmethod
def connect(cls, volume, connector, **kwargs):
pass
@staticmethod
def _is_multipathed_conn(kwargs):
pass
def __init__(self, *args, **kwargs):
pass
@property
def conn_info(self):
pass
@conn_info.setter
def conn_info(self):
pass
@property
def protocol(self):
pass
@property
def connector_info(self):
pass
@connector_info.setter
def connector_info(self):
pass
@property
def device(self):
pass
@device.setter
def device(self):
pass
@property
def path(self):
pass
@property
def connector_info(self):
pass
@property
def attached(self):
pass
@property
def connected(self):
pass
@classmethod
def _load(cls, backend, ovo, volume=None, save=False):
pass
def _disconnect(self, force=False):
pass
def disconnect(self, force=False):
pass
def device_attached(self, device):
pass
def attached(self):
pass
def detach(self, force=False, ignore_errors=False, exc=None):
pass
@classmethod
def get_by_id(cls, connection_id):
pass
@property
def backend(self):
pass
@backend.setter
def backend(self):
pass
def save(self):
pass
| 42 | 1 | 8 | 1 | 7 | 0 | 2 | 0.1 | 2 | 3 | 0 | 0 | 20 | 4 | 24 | 46 | 244 | 38 | 188 | 67 | 146 | 18 | 147 | 50 | 122 | 5 | 2 | 2 | 51 |
3,658 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/objects.py
|
cinderlib.objects.KeyValue
|
class KeyValue(object):
def __init__(self, key=None, value=None):
self.key = key
self.value = value
def __eq__(self, other):
return (self.key, self.value) == (other.key, other.value)
|
class KeyValue(object):
def __init__(self, key=None, value=None):
pass
def __eq__(self, other):
pass
| 3 | 0 | 3 | 0 | 3 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 2 | 2 | 2 | 2 | 7 | 1 | 6 | 5 | 3 | 0 | 6 | 5 | 3 | 1 | 1 | 0 | 2 |
3,659 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/objects.py
|
cinderlib.objects.LazyVolumeAttr
|
class LazyVolumeAttr(object):
LAZY_PROPERTIES = ('volume',)
_volume = None
def __init__(self, volume):
if volume:
self._volume = volume
# Ensure circular reference is set
self._ovo.volume = volume._ovo
self._ovo.volume_id = volume._ovo.id
elif self._ovo.obj_attr_is_set('volume'):
self._volume = Volume._load(self.backend, self._ovo.volume)
@property
def volume(self):
# Lazy loading
if self._volume is None:
self._volume = Volume.get_by_id(self.volume_id)
self._ovo.volume = self._volume._ovo
return self._volume
@volume.setter
def volume(self, value):
self._volume = value
self._ovo.volume = value._ovo
def refresh(self):
last_self = self.get_by_id(self.id)
if self._volume is not None:
last_self.volume
vars(self).clear()
vars(self).update(vars(last_self))
|
class LazyVolumeAttr(object):
def __init__(self, volume):
pass
@property
def volume(self):
pass
@volume.setter
def volume(self):
pass
def refresh(self):
pass
| 7 | 0 | 6 | 0 | 5 | 1 | 2 | 0.08 | 1 | 1 | 1 | 2 | 4 | 0 | 4 | 4 | 32 | 4 | 26 | 10 | 19 | 2 | 23 | 8 | 18 | 3 | 1 | 1 | 8 |
3,660 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/objects.py
|
cinderlib.objects.NamedObject
|
class NamedObject(Object):
def __init__(self, backend, **fields_data):
if 'description' in fields_data:
fields_data['display_description'] = fields_data.pop('description')
if 'name' in fields_data:
fields_data['display_name'] = fields_data.pop('name')
super(NamedObject, self).__init__(backend, **fields_data)
@property
def name(self):
return self._ovo.display_name
@property
def description(self):
return self._ovo.display_description
@property
def name_in_storage(self):
return self._ovo.name
|
class NamedObject(Object):
def __init__(self, backend, **fields_data):
pass
@property
def name(self):
pass
@property
def description(self):
pass
@property
def name_in_storage(self):
pass
| 8 | 0 | 3 | 0 | 3 | 0 | 2 | 0 | 1 | 1 | 0 | 2 | 4 | 0 | 4 | 22 | 19 | 3 | 16 | 8 | 8 | 0 | 13 | 5 | 8 | 3 | 2 | 1 | 6 |
3,661 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/objects.py
|
cinderlib.objects.Volume
|
class Volume(NamedObject):
OVO_CLASS = cinder_objs.Volume
SIMPLE_JSON_IGNORE = ('snapshots', 'volume_attachment')
DEFAULT_FIELDS_VALUES = {
'size': 1,
'user_id': Object.CONTEXT.user_id,
'project_id': Object.CONTEXT.project_id,
'status': 'creating',
'attach_status': 'detached',
'metadata': {},
'admin_metadata': {},
'glance_metadata': {},
}
LAZY_PROPERTIES = ('snapshots', 'connections')
_ignore_keys = ('id', CONNECTIONS_OVO_FIELD, 'snapshots', 'volume_type')
def __init__(self, backend_or_vol, pool_name=None, **kwargs):
# Accept backend name for convenience
if isinstance(backend_or_vol, six.string_types):
backend_name = backend_or_vol
backend_or_vol = self._get_backend(backend_or_vol)
elif isinstance(backend_or_vol, self.backend_class):
backend_name = backend_or_vol.id
elif isinstance(backend_or_vol, Volume):
backend_str, pool = backend_or_vol._ovo.host.split('#')
backend_name = backend_str.split('@')[-1]
pool_name = pool_name or pool
for key in backend_or_vol._ovo.fields:
if (backend_or_vol._ovo.obj_attr_is_set(key) and
key not in self._ignore_keys):
kwargs.setdefault(key, getattr(backend_or_vol._ovo, key))
if backend_or_vol.volume_type:
kwargs.setdefault('extra_specs',
backend_or_vol.volume_type.extra_specs)
if backend_or_vol.volume_type.qos_specs:
kwargs.setdefault(
'qos_specs',
backend_or_vol.volume_type.qos_specs.specs)
backend_or_vol = backend_or_vol.backend
if '__ovo' not in kwargs:
kwargs[CONNECTIONS_OVO_FIELD] = (
cinder_objs.VolumeAttachmentList(context=self.CONTEXT))
kwargs['snapshots'] = (
cinder_objs.SnapshotList(context=self.CONTEXT))
self._snapshots = []
self._connections = []
qos_specs = kwargs.pop('qos_specs', None)
extra_specs = kwargs.pop('extra_specs', {})
super(Volume, self).__init__(backend_or_vol, **kwargs)
self._populate_data()
self.local_attach = None
# If we overwrote the host, then we ignore pool_name and don't set a
# default value or copy the one from the source either.
if 'host' not in kwargs and '__ovo' not in kwargs:
# TODO(geguileo): Add pool support
pool_name = pool_name or backend_or_vol.pool_names[0]
self._ovo.host = ('%s@%s#%s' %
(cfg.CONF.host, backend_name, pool_name))
if qos_specs or extra_specs:
if qos_specs:
qos_specs = cinder_objs.QualityOfServiceSpecs(
id=self.id, name=self.id,
consumer='back-end', specs=qos_specs)
qos_specs_id = self.id
else:
qos_specs = qos_specs_id = None
self._ovo.volume_type = cinder_objs.VolumeType(
context=self.CONTEXT,
is_public=True,
id=self.id,
name=self.id,
qos_specs_id=qos_specs_id,
extra_specs=extra_specs,
qos_specs=qos_specs)
self._ovo.volume_type_id = self.id
@property
def snapshots(self):
# Lazy loading
if self._snapshots is None:
self._snapshots = self.persistence.get_snapshots(volume_id=self.id)
for snap in self._snapshots:
snap.volume = self
ovos = [snap._ovo for snap in self._snapshots]
self._ovo.snapshots = cinder_objs.SnapshotList(objects=ovos)
self._ovo.obj_reset_changes(('snapshots',))
return self._snapshots
@property
def connections(self):
# Lazy loading
if self._connections is None:
self._connections = self.persistence.get_connections(
volume_id=self.id)
for conn in self._connections:
conn.volume = self
ovos = [conn._ovo for conn in self._connections]
setattr(self._ovo, CONNECTIONS_OVO_FIELD,
cinder_objs.VolumeAttachmentList(objects=ovos))
self._ovo.obj_reset_changes((CONNECTIONS_OVO_FIELD,))
return self._connections
@classmethod
def get_by_id(cls, volume_id):
result = cls.persistence.get_volumes(volume_id=volume_id)
if not result:
raise exception.VolumeNotFound(volume_id=volume_id)
return result[0]
@classmethod
def get_by_name(cls, volume_name):
return cls.persistence.get_volumes(volume_name=volume_name)
def _populate_data(self):
if self._ovo.obj_attr_is_set('snapshots'):
self._snapshots = []
for snap_ovo in self._ovo.snapshots:
# Set circular reference
snap_ovo.volume = self._ovo
Snapshot._load(self.backend, snap_ovo, self)
else:
self._snapshots = None
if self._ovo.obj_attr_is_set(CONNECTIONS_OVO_FIELD):
self._connections = []
for conn_ovo in getattr(self._ovo, CONNECTIONS_OVO_FIELD):
# Set circular reference
conn_ovo.volume = self._ovo
Connection._load(self.backend, conn_ovo, self)
else:
self._connections = None
@classmethod
def _load(cls, backend, ovo, save=None):
vol = cls(backend, __ovo=ovo)
if save:
vol.save()
if vol._snapshots:
for s in vol._snapshots:
s.obj_reset_changes()
s.save()
if vol._connections:
for c in vol._connections:
c.obj_reset_changes()
c.save()
return vol
def create(self):
self.backend._start_creating_volume(self)
try:
model_update = self.backend.driver.create_volume(self._ovo)
self._ovo.status = 'available'
if model_update:
self._ovo.update(model_update)
self.backend._volume_created(self)
except Exception:
self._ovo.status = 'error'
self._raise_with_resource()
finally:
self.save()
def _snapshot_removed(self, snapshot):
# The snapshot instance in memory could be out of sync and not be
# identical, so check by ID.
i, snap = utils.find_by_id(snapshot.id, self._snapshots)
if snap:
del self._snapshots[i]
i, ovo = utils.find_by_id(snapshot.id, self._ovo.snapshots.objects)
if ovo:
del self._ovo.snapshots.objects[i]
def _connection_removed(self, connection):
# The connection instance in memory could be out of sync and not be
# identical, so check by ID.
i, conn = utils.find_by_id(connection.id, self._connections)
if conn:
del self._connections[i]
ovo_conns = getattr(self._ovo, CONNECTIONS_OVO_FIELD).objects
i, ovo_conn = utils.find_by_id(connection.id, ovo_conns)
if ovo_conn:
del ovo_conns[i]
def delete(self):
if self.snapshots:
msg = 'Cannot delete volume %s with snapshots' % self.id
raise exception.InvalidVolume(reason=msg)
try:
self.backend.driver.delete_volume(self._ovo)
self.persistence.delete_volume(self)
self.backend._volume_removed(self)
self._ovo.status = 'deleted'
except Exception:
self._ovo.status = 'error_deleting'
self.save()
self._raise_with_resource()
def extend(self, size):
volume = self._ovo
volume.previous_status = volume.status
volume.status = 'extending'
try:
self.backend.driver.extend_volume(volume, size)
volume.size = size
volume.status = volume.previous_status
volume.previous_status = None
except Exception:
volume.status = 'error'
self._raise_with_resource()
finally:
self.save()
def clone(self, **new_vol_attrs):
new_vol_attrs['source_vol_id'] = self.id
new_vol = Volume(self, **new_vol_attrs)
self.backend._start_creating_volume(new_vol)
try:
model_update = self.backend.driver.create_cloned_volume(
new_vol._ovo, self._ovo)
new_vol.status = 'available'
if model_update:
new_vol.update(model_update)
self.backend._volume_created(new_vol)
except Exception:
new_vol.status = 'error'
new_vol._raise_with_resource()
finally:
new_vol.save()
return new_vol
def create_snapshot(self, name='', description='', **kwargs):
snap = Snapshot(self, name=name, description=description, **kwargs)
try:
snap.create()
finally:
if self._snapshots is not None:
self._snapshots.append(snap)
self._ovo.snapshots.objects.append(snap._ovo)
return snap
def attach(self):
connector_dict = brick_connector.get_connector_properties(
self.backend_class.root_helper,
cfg.CONF.my_ip,
self.backend.configuration.use_multipath_for_image_xfer,
self.backend.configuration.enforce_multipath_for_image_xfer)
conn = self.connect(connector_dict)
try:
conn.attach()
except Exception:
self.disconnect(conn)
raise
return conn
def detach(self, force=False, ignore_errors=False):
if not self.local_attach:
raise exception.NotLocal(self.id)
exc = brick_exception.ExceptionChainer()
conn = self.local_attach
try:
conn.detach(force, ignore_errors, exc)
except Exception:
if not force:
raise
with exc.context(force, 'Unable to disconnect'):
conn.disconnect(force)
if exc and not ignore_errors:
raise exc
def connect(self, connector_dict, **ovo_fields):
model_update = self.backend.driver.create_export(self.CONTEXT,
self._ovo,
connector_dict)
if model_update:
self._ovo.update(model_update)
self.save()
try:
conn = Connection.connect(self, connector_dict, **ovo_fields)
if self._connections is not None:
self._connections.append(conn)
ovo_conns = getattr(self._ovo, CONNECTIONS_OVO_FIELD).objects
ovo_conns.append(conn._ovo)
self._ovo.status = 'in-use'
self.save()
except Exception:
self._remove_export()
self._raise_with_resource()
return conn
def _disconnect(self, connection):
self._remove_export()
self._connection_removed(connection)
if not self.connections:
self._ovo.status = 'available'
self.save()
def disconnect(self, connection, force=False):
connection._disconnect(force)
self._disconnect(connection)
def cleanup(self):
for attach in self.connections:
attach.detach()
self._remove_export()
def _remove_export(self):
self.backend.driver.remove_export(self._context, self._ovo)
def refresh(self):
last_self = self.get_by_id(self.id)
if self._snapshots is not None:
last_self.snapshots
if self._connections is not None:
last_self.connections
vars(self).clear()
vars(self).update(vars(last_self))
def save(self):
self.persistence.set_volume(self)
|
class Volume(NamedObject):
def __init__(self, backend_or_vol, pool_name=None, **kwargs):
pass
@property
def snapshots(self):
pass
@property
def connections(self):
pass
@classmethod
def get_by_id(cls, volume_id):
pass
@classmethod
def get_by_name(cls, volume_name):
pass
def _populate_data(self):
pass
@classmethod
def _load(cls, backend, ovo, save=None):
pass
def create(self):
pass
def _snapshot_removed(self, snapshot):
pass
def _connection_removed(self, connection):
pass
def delete(self):
pass
def extend(self, size):
pass
def clone(self, **new_vol_attrs):
pass
def create_snapshot(self, name='', description='', **kwargs):
pass
def attach(self):
pass
def detach(self, force=False, ignore_errors=False):
pass
def connections(self):
pass
def _disconnect(self, connection):
pass
def disconnect(self, connection, force=False):
pass
def cleanup(self):
pass
def _remove_export(self):
pass
def refresh(self):
pass
def save(self):
pass
| 29 | 0 | 13 | 1 | 11 | 1 | 3 | 0.05 | 1 | 4 | 2 | 0 | 20 | 6 | 23 | 45 | 334 | 40 | 282 | 76 | 253 | 14 | 234 | 68 | 210 | 12 | 3 | 3 | 72 |
3,662 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/objects.py
|
cinderlib.objects.Snapshot
|
class Snapshot(NamedObject, LazyVolumeAttr):
OVO_CLASS = cinder_objs.Snapshot
SIMPLE_JSON_IGNORE = ('volume',)
DEFAULT_FIELDS_VALUES = {
'status': 'creating',
'metadata': {},
}
def __init__(self, volume, **kwargs):
param_backend = self._get_backend(kwargs.pop('backend', None))
if '__ovo' in kwargs:
backend = kwargs['__ovo'][BACKEND_NAME_SNAPSHOT_FIELD]
else:
kwargs.setdefault('user_id', volume.user_id)
kwargs.setdefault('project_id', volume.project_id)
kwargs['volume_id'] = volume.id
kwargs['volume_size'] = volume.size
kwargs['volume_type_id'] = volume.volume_type_id
kwargs['volume'] = volume._ovo
if volume:
backend = volume.backend.id
kwargs[BACKEND_NAME_SNAPSHOT_FIELD] = backend
else:
backend = param_backend and param_backend.id
if not (backend or param_backend):
raise ValueError('Backend not provided')
if backend and param_backend and param_backend.id != backend:
raise ValueError("Multiple backends provided and they don't match")
super(Snapshot, self).__init__(backend=param_backend or backend,
**kwargs)
LazyVolumeAttr.__init__(self, volume)
@classmethod
def _load(cls, backend, ovo, volume=None, save=False):
# We let the __init__ method set the _volume if exists
snap = cls(volume, backend=backend, __ovo=ovo)
if save:
snap.save()
# Restore circular reference only if we have all the elements
if snap._volume:
utils.add_by_id(snap, snap._volume._snapshots)
utils.add_by_id(snap._ovo, snap._volume._ovo.snapshots.objects)
return snap
def create(self):
try:
model_update = self.backend.driver.create_snapshot(self._ovo)
self._ovo.status = 'available'
if model_update:
self._ovo.update(model_update)
except Exception:
self._ovo.status = 'error'
self._raise_with_resource()
finally:
self.save()
def delete(self):
try:
self.backend.driver.delete_snapshot(self._ovo)
self.persistence.delete_snapshot(self)
self._ovo.status = 'deleted'
except Exception:
self._ovo.status = 'error_deleting'
self.save()
self._raise_with_resource()
if self._volume is not None:
self._volume._snapshot_removed(self)
def create_volume(self, **new_vol_params):
new_vol_params.setdefault('size', self.volume_size)
new_vol_params['snapshot_id'] = self.id
new_vol = Volume(self.volume, **new_vol_params)
self.backend._start_creating_volume(new_vol)
try:
model_update = self.backend.driver.create_volume_from_snapshot(
new_vol._ovo, self._ovo)
new_vol._ovo.status = 'available'
if model_update:
new_vol._ovo.update(model_update)
self.backend._volume_created(new_vol)
except Exception:
new_vol._ovo.status = 'error'
new_vol._raise_with_resource()
finally:
new_vol.save()
return new_vol
@classmethod
def get_by_id(cls, snapshot_id):
result = cls.persistence.get_snapshots(snapshot_id=snapshot_id)
if not result:
raise exception.SnapshotNotFound(snapshot_id=snapshot_id)
return result[0]
@classmethod
def get_by_name(cls, snapshot_name):
return cls.persistence.get_snapshots(snapshot_name=snapshot_name)
def save(self):
self.persistence.set_snapshot(self)
|
class Snapshot(NamedObject, LazyVolumeAttr):
def __init__(self, volume, **kwargs):
pass
@classmethod
def _load(cls, backend, ovo, volume=None, save=False):
pass
def create(self):
pass
def delete(self):
pass
def create_volume(self, **new_vol_params):
pass
@classmethod
def get_by_id(cls, snapshot_id):
pass
@classmethod
def get_by_name(cls, snapshot_name):
pass
def save(self):
pass
| 12 | 0 | 11 | 1 | 10 | 0 | 3 | 0.02 | 2 | 4 | 1 | 0 | 5 | 0 | 8 | 34 | 105 | 13 | 90 | 22 | 78 | 2 | 78 | 19 | 69 | 5 | 3 | 2 | 21 |
3,663 |
Akrog/cinderlib
|
Akrog_cinderlib/cinderlib/objects.py
|
cinderlib.objects.Object
|
class Object(object):
"""Base class for our resource representation objects."""
SIMPLE_JSON_IGNORE = tuple()
DEFAULT_FIELDS_VALUES = {}
LAZY_PROPERTIES = tuple()
backend_class = None
CONTEXT = context.RequestContext(user_id=DEFAULT_USER_ID,
project_id=DEFAULT_PROJECT_ID,
is_admin=True,
overwrite=False)
def _get_backend(self, backend_name_or_obj):
if isinstance(backend_name_or_obj, six.string_types):
try:
return self.backend_class.backends[backend_name_or_obj]
except KeyError:
if self.backend_class.fail_on_missing_backend:
raise
return backend_name_or_obj
def __init__(self, backend, **fields_data):
self.backend = self._get_backend(backend)
__ovo = fields_data.get('__ovo')
if __ovo:
self._ovo = __ovo
else:
self._ovo = self._create_ovo(**fields_data)
# Store a reference to the cinderlib obj in the OVO for serialization
self._ovo._cl_obj_ = self
@classmethod
def setup(cls, persistence_driver, backend_class, project_id, user_id,
non_uuid_ids):
cls.persistence = persistence_driver
cls.backend_class = backend_class
# Set the global context if we aren't using the default
project_id = project_id or DEFAULT_PROJECT_ID
user_id = user_id or DEFAULT_USER_ID
if (project_id != cls.CONTEXT.project_id or
user_id != cls.CONTEXT.user_id):
cls.CONTEXT.user_id = user_id
cls.CONTEXT.project_id = project_id
Volume.DEFAULT_FIELDS_VALUES['user_id'] = user_id
Volume.DEFAULT_FIELDS_VALUES['project_id'] = project_id
# Configure OVOs to support non_uuid_ids
if non_uuid_ids:
for ovo_name in cinder_base_ovo.CinderObjectRegistry.obj_classes():
ovo_cls = getattr(cinder_objs, ovo_name)
if 'id' in ovo_cls.fields:
ovo_cls.fields['id'] = cinder_base_ovo.fields.StringField()
def _to_primitive(self):
"""Return custom cinderlib data for serialization."""
return None
def _create_ovo(self, **fields_data):
# The base are the default values we define on our own classes
fields_values = self.DEFAULT_FIELDS_VALUES.copy()
# Apply the values defined by the caller
fields_values.update(fields_data)
# We support manually setting the id, so set only if not already set
# or if set to None
if not fields_values.get('id'):
fields_values['id'] = self.new_uuid()
# Set non set field values based on OVO's default value and on whether
# it is nullable or not.
for field_name, field in self.OVO_CLASS.fields.items():
if field.default != cinder_base_ovo.fields.UnspecifiedDefault:
fields_values.setdefault(field_name, field.default)
elif field.nullable:
fields_values.setdefault(field_name, None)
if ('created_at' in self.OVO_CLASS.fields and
not fields_values.get('created_at')):
fields_values['created_at'] = timeutils.utcnow()
return self.OVO_CLASS(context=self.CONTEXT, **fields_values)
@property
def json(self):
return self.to_json(simplified=False)
def to_json(self, simplified=True):
visited = set()
if simplified:
for field in self.SIMPLE_JSON_IGNORE:
if self._ovo.obj_attr_is_set(field):
visited.add(id(getattr(self._ovo, field)))
ovo = self._ovo.obj_to_primitive(visited=visited)
return {'class': type(self).__name__,
# If no driver loaded, just return the name of the backend
'backend': getattr(self.backend, 'config',
{'volume_backend_name': self.backend}),
'ovo': ovo}
@property
def jsons(self):
return self.to_jsons(simplified=False)
def to_jsons(self, simplified=True):
json_data = self.to_json(simplified)
return json_lib.dumps(json_data, separators=(',', ':'))
def _only_ovo_data(self, ovo):
if isinstance(ovo, dict):
if 'versioned_object.data' in ovo:
value = ovo['versioned_object.data']
if ['objects'] == value.keys():
return self._only_ovo_data(value['objects'])
key = ovo['versioned_object.name'].lower()
return {key: self._only_ovo_data(value)}
for key in ovo.keys():
ovo[key] = self._only_ovo_data(ovo[key])
if isinstance(ovo, list) and ovo:
return [self._only_ovo_data(e) for e in ovo]
return ovo
def to_dict(self):
json_ovo = self.json
return self._only_ovo_data(json_ovo['ovo'])
@property
def dump(self):
# Make sure we load lazy loading properties
for lazy_property in self.LAZY_PROPERTIES:
getattr(self, lazy_property)
return self.json
@property
def dumps(self):
return json_lib.dumps(self.dump, separators=(',', ':'))
def __repr__(self):
backend = self.backend
if isinstance(self.backend, self.backend_class):
backend = backend.id
return ('<cinderlib.%s object %s on backend %s>' %
(type(self).__name__, self.id, backend))
@classmethod
def load(cls, json_src, save=False):
backend = cls.backend_class.load_backend(json_src['backend'])
ovo = cinder_base_ovo.CinderObject.obj_from_primitive(json_src['ovo'],
cls.CONTEXT)
return cls._load(backend, ovo, save=save)
@staticmethod
def new_uuid():
return str(uuid.uuid4())
def __getattr__(self, name):
if name == '_ovo':
raise AttributeError('Attribute _ovo is not yet set')
return getattr(self._ovo, name)
def _raise_with_resource(self):
exc_info = sys.exc_info()
exc_info[1].resource = self
six.reraise(*exc_info)
|
class Object(object):
'''Base class for our resource representation objects.'''
def _get_backend(self, backend_name_or_obj):
pass
def __init__(self, backend, **fields_data):
pass
@classmethod
def setup(cls, persistence_driver, backend_class, project_id, user_id,
non_uuid_ids):
pass
def _to_primitive(self):
'''Return custom cinderlib data for serialization.'''
pass
def _create_ovo(self, **fields_data):
pass
@property
def json(self):
pass
def to_json(self, simplified=True):
pass
@property
def jsons(self):
pass
def to_jsons(self, simplified=True):
pass
def _only_ovo_data(self, ovo):
pass
def to_dict(self):
pass
@property
def dump(self):
pass
@property
def dumps(self):
pass
def __repr__(self):
pass
@classmethod
def load(cls, json_src, save=False):
pass
@staticmethod
def new_uuid():
pass
def __getattr__(self, name):
pass
def _raise_with_resource(self):
pass
| 26 | 2 | 7 | 1 | 6 | 1 | 2 | 0.1 | 1 | 8 | 1 | 2 | 15 | 2 | 18 | 18 | 167 | 28 | 126 | 51 | 99 | 13 | 106 | 43 | 87 | 6 | 1 | 3 | 42 |
3,664 |
AlecAivazis/graphql-over-kafka
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/AlecAivazis_graphql-over-kafka/example/api.py
|
api.RecipeBookAPIGateway
|
class RecipeBookAPIGateway(nautilus.APIGateway):
@nautilus.auth_criteria('Ingredient')
def auth_ingredient(self, model, user):
# an ingredient can only be viewed by author
return model.author == user
|
class RecipeBookAPIGateway(nautilus.APIGateway):
@nautilus.auth_criteria('Ingredient')
def auth_ingredient(self, model, user):
pass
| 3 | 0 | 3 | 0 | 2 | 1 | 1 | 0.25 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 6 | 1 | 4 | 3 | 1 | 1 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
3,665 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/nautilus/services/service.py
|
nautilus.services.service.Service
|
class Service(metaclass=ServiceMetaClass):
"""
This is the base class for all services that are part of a nautilus
cloud. This class provides basic functionalities such as registration,
responding to actions, and predictable api endpoints.
Args:
action_handler (optional, function): The callback function fired when
an action is recieved. If None, the service does not connect to the
action queue.
config (optional, class): A python class to use for configuring the
service.
name (string): The name of the service. This will be used to
register the service with the registry as act as the designator
for a ServiceObjectType.
schema (optional, graphql.core.type.GraphQLSchema): The GraphQL schema
which acts as a basis for the external API. If None, no endpoints are
added to the service.
Example:
.. code-block:: python
import nautilus
from nautilus.api.util import create_model_schema
from nautilus.network import crud_handler
import nautilus.models as models
class Model(models.BaseModel):
name = models.fields.CharField()
class MyService(nautilus.Service):
name = 'My Awesome Service'
schema = create_model_schema(Model)
action_handler = crud_handler(Model)
"""
config = None
name = None
schema = None
action_handler = ServiceActionHandler
api_request_handler_class = GraphQLRequestHandler
_routes = []
def __init__(
self,
name=None,
schema=None,
action_handler=None,
config=None,
auth=True,
):
self.name = name or self.name or type(self).name
self.app = None
self.__name__ = self.name
self.event_broker = None
self.schema = schema or self.schema
# wrap the given configuration in the nautilus wrapper
self.config = Config(self.config, config)
# initialize the service
self.init_app()
self.init_routes()
self.init_action_handler()
# placeholders
self._http_server = None
self._server_handler = None
# cleanup
def init_app(self):
from nautilus.api.endpoints import template_dir as api_template_dir
from nautilus.auth import template_dir as auth_template_dir
# the secret key
secret_key = 'NERbTdtQl7IrBM9kx1PDjJXiyZhWWBZ9E7q2B3U7KVE='
# create a web application instance
self.app = aiohttp.web.Application(
middlewares=[
session_middleware(
EncryptedCookieStorage(secret_key, secure=True, domain='*')
)
]
)
# add the template loader
aiohttp_jinja2.setup(self.app,
loader=jinja2.ChoiceLoader([
jinja2.FileSystemLoader(api_template_dir),
jinja2.FileSystemLoader(auth_template_dir)
])
)
# TODO:
# debug mode
# attach the ioloop to the application
self.loop = asyncio.get_event_loop()
# attach the service to the loop
self.loop.service = self
async def announce(self):
"""
This method is used to announce the existence of the service
"""
# send a serialized event
await self.event_broker.send(
action_type=intialize_service_action(),
payload=json.dumps(self.summarize())
)
def summarize(self, **extra_fields):
# return the summary
return dict(name=str(self.name), **extra_fields)
def init_routes(self):
# for each route that was registered
for route in self._routes:
# add the service instance to the route handler
route['request_handler'].service = self
# add the corresponding http endpoint
self.add_http_endpoint(**route)
# add the schema reference to graphql handler
self.api_request_handler_class.service = self
# add the static file urls
self.app.router.add_static('/graphiql/static/', api_endpoint_static)
# add the default api handler
self.add_http_endpoint('/', self.api_request_handler_class)
# add the graphiql endpoint
self.add_http_endpoint('/graphiql', GraphiQLRequestHandler)
def init_action_handler(self):
# create a wrapper for it
self.event_broker = self.action_handler()
# pass the service to the event broker
self.event_broker.service = self
def run(self, host="localhost", port=8000, shutdown_timeout=60.0, **kwargs):
"""
This function starts the service's network intefaces.
Args:
port (int): The port for the http server.
"""
print("Running service on http://localhost:%i. " % port + \
"Press Ctrl+C to terminate.")
# apply the configuration to the service config
self.config.port = port
self.config.host = host
# start the loop
try:
# if an event broker has been created for this service
if self.event_broker:
# start the broker
self.event_broker.start()
# announce the service
self.loop.run_until_complete(self.announce())
# the handler for the http server
http_handler = self.app.make_handler()
# create an asyncio server
self._http_server = self.loop.create_server(http_handler, host, port)
# grab the handler for the server callback
self._server_handler = self.loop.run_until_complete(self._http_server)
# start the event loop
self.loop.run_forever()
# if the user interrupted the server
except KeyboardInterrupt:
# keep going
pass
# when we're done
finally:
try:
# clean up the service
self.cleanup()
# if we end up closing before any variables get assigned
except UnboundLocalError:
# just ignore it (there was nothing to close)
pass
# close the event loop
self.loop.close()
def cleanup(self):
"""
This function is called when the service has finished running
regardless of intentionally or not.
"""
# if an event broker has been created for this service
if self.event_broker:
# stop the event broker
self.event_broker.stop()
# attempt
try:
# close the http server
self._server_handler.close()
self.loop.run_until_complete(self._server_handler.wait_closed())
self.loop.run_until_complete(self._http_handler.finish_connections(shutdown_timeout))
# if there was no handler
except AttributeError:
# keep going
pass
# more cleanup
self.loop.run_until_complete(self.app.shutdown())
self.loop.run_until_complete(self.app.cleanup())
def add_http_endpoint(self, url, request_handler):
"""
This method provides a programatic way of added invidual routes
to the http server.
Args:
url (str): the url to be handled by the request_handler
request_handler (nautilus.network.RequestHandler): The request handler
"""
self.app.router.add_route('*', url, request_handler)
@classmethod
def route(cls, route, config=None):
"""
This method provides a decorator for adding endpoints to the
http server.
Args:
route (str): The url to be handled by the RequestHandled
config (dict): Configuration for the request handler
Example:
.. code-block:: python
import nautilus
from nauilus.network.http import RequestHandler
class MyService(nautilus.Service):
# ...
@MyService.route('/')
class HelloWorld(RequestHandler):
def get(self):
return self.finish('hello world')
"""
def decorator(wrapped_class, **kwds):
# add the endpoint at the given route
cls._routes.append(
dict(url=route, request_handler=wrapped_class)
)
# return the class undecorated
return wrapped_class
# return the decorator
return decorator
def _json(self):
# return a summary of the service
return self.summarize()
|
class Service(metaclass=ServiceMetaClass):
'''
This is the base class for all services that are part of a nautilus
cloud. This class provides basic functionalities such as registration,
responding to actions, and predictable api endpoints.
Args:
action_handler (optional, function): The callback function fired when
an action is recieved. If None, the service does not connect to the
action queue.
config (optional, class): A python class to use for configuring the
service.
name (string): The name of the service. This will be used to
register the service with the registry as act as the designator
for a ServiceObjectType.
schema (optional, graphql.core.type.GraphQLSchema): The GraphQL schema
which acts as a basis for the external API. If None, no endpoints are
added to the service.
Example:
.. code-block:: python
import nautilus
from nautilus.api.util import create_model_schema
from nautilus.network import crud_handler
import nautilus.models as models
class Model(models.BaseModel):
name = models.fields.CharField()
class MyService(nautilus.Service):
name = 'My Awesome Service'
schema = create_model_schema(Model)
action_handler = crud_handler(Model)
'''
def __init__(
self,
name=None,
schema=None,
action_handler=None,
config=None,
auth=True,
):
pass
def init_app(self):
pass
async def announce(self):
'''
This method is used to announce the existence of the service
'''
pass
def summarize(self, **extra_fields):
pass
def init_routes(self):
pass
def init_action_handler(self):
pass
def run(self, host="localhost", port=8000, shutdown_timeout=60.0, **kwargs):
'''
This function starts the service's network intefaces.
Args:
port (int): The port for the http server.
'''
pass
def cleanup(self):
'''
This function is called when the service has finished running
regardless of intentionally or not.
'''
pass
def add_http_endpoint(self, url, request_handler):
'''
This method provides a programatic way of added invidual routes
to the http server.
Args:
url (str): the url to be handled by the request_handler
request_handler (nautilus.network.RequestHandler): The request handler
'''
pass
@classmethod
def route(cls, route, config=None):
'''
This method provides a decorator for adding endpoints to the
http server.
Args:
route (str): The url to be handled by the RequestHandled
config (dict): Configuration for the request handler
Example:
.. code-block:: python
import nautilus
from nauilus.network.http import RequestHandler
class MyService(nautilus.Service):
# ...
@MyService.route('/')
class HelloWorld(RequestHandler):
def get(self):
return self.finish('hello world')
'''
pass
def decorator(wrapped_class, **kwds):
pass
def _json(self):
pass
| 14 | 6 | 18 | 2 | 9 | 7 | 2 | 1.07 | 1 | 6 | 1 | 2 | 10 | 6 | 11 | 25 | 283 | 62 | 107 | 38 | 84 | 114 | 81 | 30 | 66 | 4 | 3 | 2 | 18 |
3,666 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/nautilus/services/modelService.py
|
nautilus.services.modelService.ModelService
|
class ModelService(Service):
"""
This service acts as the primary data store in your cloud. It manages
the records of a single model by listening for actions that indicate
a record mutation as well as emitting actions when the mutations have
finished (whether successfully or not). The external API is
automatically generated to match the given model.
Args:
model (nautilus.BaseModel): The nautilus model to manage.
Example:
.. code-block:: python
import nautilus
import nautilus.models as models
class Model(models.BaseModel):
name = models.fields.CharField()
class ServiceConfig:
database_url = 'sqlite:////tmp/models.db'
class MyModelService(nautilus.ModelService):
model = Model
config = ServiceConfig
"""
model = None
def __new__(cls, *args, **kwds):
# make sure the service has the right name
cls.name = model_service_name(cls.model) if cls.model else ''
# bubble up
return super().__new__(cls, *args)
def __init__(self, model=None, **kwargs):
# avoid circular depdencies
from ..api.util import create_model_schema
# if we were given a model for the service
if model:
# use the given model
self.model = model
# if there is no model associated with this service
if not self.model:
# yell loudly
raise ValueError("Please provide a model for the model service.")
# pull the name of the service from kwargs if it was given
name = kwargs.pop('name', None) or model_service_name(self.model)
# create the service
super().__init__(
schema=create_model_schema(self.model),
name=name,
**kwargs
)
# initialize the database
self.init_db()
@property
def action_handler(self):
# create a crud handler for the model
model_handler = crud_handler(self.model, name=self.name)
class ModelActionHandler(super().action_handler):
loop = self.loop
async def handle_action(inner_self, action_type, payload, props, **kwds):
"""
The default action handler for a model service call
"""
# bubble up
response = await super(ModelActionHandler, inner_self).handle_action(action_type=action_type, payload=payload, props=props,**kwds)
# call the crud handler
await model_handler(self, action_type=action_type, payload=payload, props=props,**kwds)
return ModelActionHandler
def init_db(self):
"""
This function configures the database used for models to make
the configuration parameters.
"""
# get the database url from the configuration
db_url = self.config.get('database_url', 'sqlite:///nautilus.db')
# configure the nautilus database to the url
nautilus.database.init_db(db_url)
def summarize(self, **extra_fields):
# the fields for the service's model
model_fields = {field.name: field for field in list(self.model.fields())} \
if self.model \
else {}
# add the model fields to the dictionary
return dict(
**super().summarize(),
fields=[{
'name': key,
'type': type(convert_peewee_field(value)).__name__
} for key, value in model_fields.items()
],
mutations=[
summarize_crud_mutation(model=self, method='create'),
summarize_crud_mutation(model=self, method='update'),
summarize_crud_mutation(model=self, method='delete'),
],
**extra_fields
)
def get_models(self):
"""
Returns the models managed by this service.
Returns:
(list): the models managed by the service
"""
return [self.model]
|
class ModelService(Service):
'''
This service acts as the primary data store in your cloud. It manages
the records of a single model by listening for actions that indicate
a record mutation as well as emitting actions when the mutations have
finished (whether successfully or not). The external API is
automatically generated to match the given model.
Args:
model (nautilus.BaseModel): The nautilus model to manage.
Example:
.. code-block:: python
import nautilus
import nautilus.models as models
class Model(models.BaseModel):
name = models.fields.CharField()
class ServiceConfig:
database_url = 'sqlite:////tmp/models.db'
class MyModelService(nautilus.ModelService):
model = Model
config = ServiceConfig
'''
def __new__(cls, *args, **kwds):
pass
def __init__(self, model=None, **kwargs):
pass
@property
def action_handler(self):
pass
class ModelActionHandler(super().action_handler):
async def handle_action(inner_self, action_type, payload, props, **kwds):
'''
The default action handler for a model service call
'''
pass
def init_db(self):
'''
This function configures the database used for models to make
the configuration parameters.
'''
pass
def summarize(self, **extra_fields):
pass
def get_models(self):
'''
Returns the models managed by this service.
Returns:
(list): the models managed by the service
'''
pass
| 10 | 4 | 13 | 1 | 7 | 5 | 2 | 0.98 | 1 | 5 | 1 | 1 | 6 | 0 | 6 | 31 | 132 | 33 | 50 | 18 | 39 | 49 | 30 | 17 | 20 | 3 | 4 | 1 | 11 |
3,667 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/nautilus/services/connectionService.py
|
nautilus.services.connectionService.ConnectionService
|
class ConnectionService(ModelService):
"""
This service manages a "one-way" connection between two services.
The underlying schema and database are automatically generated to
match the primary keys of the linked services.
This service will listen for actions indicating the deletion of a related
model and remove any related fields to maintain consistency. And provides
a way for the api gateway to deduce the relationship between services when
summarizing the cloud.
Args:
services (list of nautilus.Service): The list of services to connect.
Example:
.. code-block:: python
# external imports
import nautilus
class ServiceConfig:
database_url = 'sqlite:////tmp/connections.db'
class MyConnection(nautilus.ConnectionService):
config = ServiceConfig
from_service = ('service_one',)
to_service = ('service_one',)
"""
from_service = None
to_service = None
def __init__(self, **kwargs):
# if there is no to service
if not self.to_service:
raise ValueError("Please provide a 'to_service'.")
# if there is no to service
if not self.from_service:
raise ValueError("Please provide a 'from_service'.")
# save a list of the models
self._services = [self.to_service[0], self.from_service[0]]
# make sure there is a unique name for every service
if len(set(self._services)) != len(self._services):
raise ValueError("Can only connect models with different name")
# create the service
super().__init__(
model=create_connection_model(self),
name=connection_service_name(self),
**kwargs
)
@property
def action_handler(self):
# create a linked handler for every service
linked_handlers = [self._create_linked_handler(service) \
for service in self._services]
class ConnectionActionHandler(super().action_handler):
async def handle_action(self, *args, **kwds):
"""
a connection service should listen for deletes on linked services
as well as the usual model service behavior
"""
# bubble up
await super().handle_action(*args, **kwds)
# for each service we care about
for handler in linked_handlers:
# call the handler
await handler(*args, **kwds)
return ConnectionActionHandler
def summarize(self, **extra_fields):
# start with the default summary
try:
return {
**super().summarize(),
'connection': {
'from': {
'service': model_service_name(self.from_service[0]),
},
'to': {
'service': model_service_name(self.to_service[0]),
}
},
**extra_fields
}
except Exception as e:
print(e)
def _create_linked_handler(self, model):
# the related action type
related_action_type = get_crud_action('delete', model, status=success_status())
# the action handler
async def action_handler(action_type, payload, notify=True, **kwds):
"""
an action handler to remove related entries in the
connection db.
"""
# if the action designates a successful delete of the model
if action_type == related_action_type:
# the id of the deleted model
related_id = payload['id']
# the query for matching fields
matching_records = getattr(self.model, model_service_name(model)) == related_id
ids = [model.id for model in self.model.filter(matching_records)]
# find the matching records
self.model.delete().where(matching_records).execute()
# if we are supposed to notify
if notify:
# notify of the related delete
await self.event_broker.send(
action_type=get_crud_action('delete', self.model, status=success_status()),
payload=ids
)
# pass the action handler
return action_handler
|
class ConnectionService(ModelService):
'''
This service manages a "one-way" connection between two services.
The underlying schema and database are automatically generated to
match the primary keys of the linked services.
This service will listen for actions indicating the deletion of a related
model and remove any related fields to maintain consistency. And provides
a way for the api gateway to deduce the relationship between services when
summarizing the cloud.
Args:
services (list of nautilus.Service): The list of services to connect.
Example:
.. code-block:: python
# external imports
import nautilus
class ServiceConfig:
database_url = 'sqlite:////tmp/connections.db'
class MyConnection(nautilus.ConnectionService):
config = ServiceConfig
from_service = ('service_one',)
to_service = ('service_one',)
'''
def __init__(self, **kwargs):
pass
@property
def action_handler(self):
pass
class ConnectionActionHandler(super().action_handler):
async def handle_action(self, *args, **kwds):
'''
a connection service should listen for deletes on linked services
as well as the usual model service behavior
'''
pass
def summarize(self, **extra_fields):
pass
def _create_linked_handler(self, model):
pass
def action_handler(self):
'''
an action handler to remove related entries in the
connection db.
'''
pass
| 9 | 3 | 21 | 2 | 11 | 7 | 2 | 0.86 | 1 | 5 | 1 | 0 | 4 | 1 | 4 | 35 | 133 | 29 | 56 | 19 | 47 | 48 | 36 | 17 | 28 | 4 | 5 | 2 | 13 |
3,668 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/nautilus/services/apiGateway.py
|
nautilus.services.apiGateway.APIGateway
|
class APIGateway(Service):
"""
This provides a single endpoint that other services and clients can
use to query the cloud without worrying about the distributed nature
of the system.
Example:
.. code-block:: python
# external imports
import nautilus
# local imports
from .schema import schema
class MyAPIGateway(nautilus.APIGateway):
schema = schema
"""
name = api_gateway_name()
model = UserPassword
api_request_handler_class = api_query.APIQueryHandler
action_handler = api_handler.APIActionHandler
_external_service_data = defaultdict(list)
secret_key = None
def __init__(self, *args, **kwds):
# bubble up
super().__init__(*args, **kwds)
# attach this service to the action handler
self.action_handler.service = self
# do any sort of database setup
self.init_db()
# make sure there is a valid secret key
def init_db(self):
"""
This function configures the database used for models to make
the configuration parameters.
"""
# get the database url from the configuration
db_url = self.config.get('database_url', 'sqlite:///passwords.db')
# configure the nautilus database to the url
nautilus.database.init_db(db_url)
# when its time for the service to announce itself
async def announce(self):
# bubble up
await super().announce()
# ask for rollcall aswell
await self.event_broker.send(
action_type=roll_call_type(),
payload='please report yourself'
)
@property
def auth_criteria(self):
"""
This attribute provides the mapping of services to their auth requirement
Returns:
(dict) : the mapping from services to their auth requirements.
"""
# the dictionary we will return
auth = {}
# go over each attribute of the service
for attr in dir(self):
# make sure we could hit an infinite loop
if attr != 'auth_criteria':
# get the actual attribute
attribute = getattr(self, attr)
# if the service represents an auth criteria
if isinstance(attribute, Callable) and hasattr(attribute, '_service_auth'):
# add the criteria to the final results
auth[getattr(self, attr)._service_auth] = attribute
# return the auth mapping
return auth
def init_routes(self):
# add the cors handler
self.cors = aiohttp_cors.setup(self.app)
# for each route that was registered
for route in self._routes:
# add the corresponding http endpoint
self.add_http_endpoint(**route)
# add the schema reference to graphql handler
self.api_request_handler_class.service = self
# add a cors resource
api_resource = self.cors.add(self.app.router.add_resource("/"))
# add the root api handler
self.cors.add(
api_resource.add_route("GET", self.api_request_handler_class),
{
"": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers=("X-Custom-Server-Header",),
allow_headers=("X-Requested-With", "Content-Type"),
max_age=3600,
)
}
)
# add the static file urls
self.app.router.add_static('/graphiql/static/', api_endpoint_static)
# add the graphiql endpoint
self.add_http_endpoint('/graphiql', GraphiQLRequestHandler)
async def login_user(self, password, **kwds):
"""
This function handles the registration of the given user credentials in the database
"""
# find the matching user with the given email
user_data = (await self._get_matching_user(fields=list(kwds.keys()), **kwds))['data']
try:
# look for a matching entry in the local database
passwordEntry = self.model.select().where(
self.model.user == user_data[root_query()][0]['pk']
)[0]
# if we couldn't acess the id of the result
except (KeyError, IndexError) as e:
# yell loudly
raise RuntimeError('Could not find matching registered user')
# if the given password matches the stored hash
if passwordEntry and passwordEntry.password == password:
# the remote entry for the user
user = user_data[root_query()][0]
# then return a dictionary with the user and sessionToken
return {
'user': user,
'sessionToken': self._user_session_token(user)
}
# otherwise the passwords don't match
raise RuntimeError("Incorrect credentials")
async def register_user(self, password, **kwds):
"""
This function is used to provide a sessionToken for later requests.
Args:
uid (str): The
"""
# so make one
user = await self._create_remote_user(password=password, **kwds)
# if there is no pk field
if not 'pk' in user:
# make sure the user has a pk field
user['pk'] = user['id']
# the query to find a matching query
match_query = self.model.user == user['id']
# if the user has already been registered
if self.model.select().where(match_query).count() > 0:
# yell loudly
raise RuntimeError('The user is already registered.')
# create an entry in the user password table
password = self.model(user=user['id'], password=password)
# save it to the database
password.save()
# return a dictionary with the user we created and a session token for later use
return {
'user': user,
'sessionToken': self._user_session_token(user)
}
async def object_resolver(self, object_name, fields, obey_auth=False, current_user=None, **filters):
"""
This function resolves a given object in the remote backend services
"""
try:
# check if an object with that name has been registered
registered = [model for model in self._external_service_data['models'] \
if model['name']==object_name][0]
# if there is no connection data yet
except AttributeError:
raise ValueError("No objects are registered with this schema yet.")
# if we dont recognize the model that was requested
except IndexError:
raise ValueError("Cannot query for object {} on this service.".format(object_name))
# the valid fields for this object
valid_fields = [field['name'] for field in registered['fields']]
# figure out if any invalid fields were requested
invalid_fields = [field for field in fields if field not in valid_fields]
try:
# make sure we never treat pk as invalid
invalid_fields.remove('pk')
# if they weren't asking for pk as a field
except ValueError:
pass
# if there were
if invalid_fields:
# yell loudly
raise ValueError("Cannot query for fields {!r} on {}".format(
invalid_fields, registered['name']
))
# make sure we include the id in the request
fields.append('pk')
# the query for model records
query = query_for_model(fields, **filters)
# the action type for the question
action_type = get_crud_action('read', object_name)
# query the appropriate stream for the information
response = await self.event_broker.ask(
action_type=action_type,
payload=query
)
# treat the reply like a json object
response_data = json.loads(response)
# if something went wrong
if 'errors' in response_data and response_data['errors']:
# return an empty response
raise ValueError(','.join(response_data['errors']))
# grab the valid list of matches
result = response_data['data'][root_query()]
# grab the auth handler for the object
auth_criteria = self.auth_criteria.get(object_name)
# if we care about auth requirements and there is one for this object
if obey_auth and auth_criteria:
# build a second list of authorized entries
authorized_results = []
# for each query result
for query_result in result:
# create a graph entity for the model
graph_entity = GraphEntity(self, model_type=object_name, id=query_result['pk'])
# if the auth handler passes
if await auth_criteria(model=graph_entity, user_id=current_user):
# add the result to the final list
authorized_results.append(query_result)
# overwrite the query result
result = authorized_results
# apply the auth handler to the result
return result
def user_session(self, user):
"""
This method handles what information the api gateway stores about
a particular user in their session.
"""
return {
'id': user['pk']
}
async def connection_resolver(self, connection_name, object):
try:
# grab the recorded data for this connection
expected = [ conn for conn in self._external_service_data['connections']\
if conn['name'] == connection_name][0]
# if there is no connection data yet
except AttributeError:
raise ValueError("No objects are registered with this schema yet.")
# if we dont recognize the model that was requested
except IndexError:
raise ValueError("Cannot query for {} on {}.".format(connection_name, object['name']))
# the target of the connection
to_service = expected['connection']['to']['service']
# ask for only the entries connected to the object
filters = {object['name']: object['pk']}
# the field of the connection is the model name
fields = [to_service]
# the query for model records
query = query_for_model(fields, **filters).replace("'", '"')
# the action type for the question
action_type = get_crud_action('read', connection_name)
# get the service name for the connection
response = json.loads(await self.event_broker.ask(
action_type=action_type,
payload=query
))
if 'errors' in response and response['errors']:
# return an empty response
raise ValueError(','.join(response['errors']))
# grab the ids from the response
ids = [int(entry[to_service]) for entry in response['data']['all_models']]
# the question for connected nodes
return ids, to_service
async def mutation_resolver(self, mutation_name, args, fields):
"""
the default behavior for mutations is to look up the event,
publish the correct event type with the args as the body,
and return the fields contained in the result
"""
try:
# make sure we can identify the mutation
mutation_summary = [mutation for mutation in \
self._external_service_data['mutations'] \
if mutation['name'] == mutation_name][0]
# if we couldn't get the first entry in the list
except KeyError as e:
# make sure the error is reported
raise ValueError("Could not execute mutation named: " + mutation_name)
# the function to use for running the mutation depends on its schronicity
# event_function = self.event_broker.ask \
# if mutation_summary['isAsync'] else self.event_broker.send
event_function = self.event_broker.ask
# send the event and wait for a response
value = await event_function(
action_type=mutation_summary['event'],
payload=args
)
try:
# return a dictionary with the values we asked for
return json.loads(value)
# if the result was not valid json
except json.decoder.JSONDecodeError:
# just throw the value
raise RuntimeError(value)
def get_models(self):
"""
Returns the models managed by this service.
Returns:
(list): the models managed by the service
"""
return [self.model]
## internal utilities
def _user_session_token(self, user):
# grab the session for this particular user
user_session = self.user_session(user)
# return the token signed by the services secret key
return generate_session_token(self.secret_key, **user_session)
def _read_session_token(self, token):
# make sure the token is valid while we're at it
return read_session_token(self.secret_key, token)
async def _get_matching_user(self, fields=[], **filters):
# the action type for a remote query
read_action = get_crud_action(method='read', model='user')
# the fields of the user to ask for
user_fields = ['pk'] + fields
# the query for matching entries
payload = """
query {
%s(%s) {
%s
}
}
""" % (root_query(), arg_string_from_dict(filters), '\n'.join(user_fields))
# perform the query and return the result
return json.loads(await self.event_broker.ask(
action_type=read_action,
payload=payload
))
async def _check_for_matching_user(self, **user_filters):
"""
This function checks if there is a user with the same uid in the
remote user service
Args:
**kwds : the filters of the user to check for
Returns:
(bool): wether or not there is a matching user
"""
# there is a matching user if there are no errors and no results from
user_data = self._get_matching_user(user_filters)
# return true if there were no errors and at lease one result
return not user_data['errors'] and len(user_data['data'][root_query()])
async def _create_remote_user(self, **payload):
"""
This method creates a service record in the remote user service
with the given email.
Args:
uid (str): the user identifier to create
Returns:
(dict): a summary of the user that was created
"""
# the action for reading user entries
read_action = get_crud_action(method='create', model='user')
# see if there is a matching user
user_data = await self.event_broker.ask(
action_type=read_action,
payload=payload
)
# treat the reply like a json object
return json.loads(user_data)
|
class APIGateway(Service):
'''
This provides a single endpoint that other services and clients can
use to query the cloud without worrying about the distributed nature
of the system.
Example:
.. code-block:: python
# external imports
import nautilus
# local imports
from .schema import schema
class MyAPIGateway(nautilus.APIGateway):
schema = schema
'''
def __init__(self, *args, **kwds):
pass
def init_db(self):
'''
This function configures the database used for models to make
the configuration parameters.
'''
pass
async def announce(self):
pass
@property
def auth_criteria(self):
'''
This attribute provides the mapping of services to their auth requirement
Returns:
(dict) : the mapping from services to their auth requirements.
'''
pass
def init_routes(self):
pass
async def login_user(self, password, **kwds):
'''
This function handles the registration of the given user credentials in the database
'''
pass
async def register_user(self, password, **kwds):
'''
This function is used to provide a sessionToken for later requests.
Args:
uid (str): The
'''
pass
async def object_resolver(self, object_name, fields, obey_auth=False, current_user=None, **filters):
'''
This function resolves a given object in the remote backend services
'''
pass
def user_session(self, user):
'''
This method handles what information the api gateway stores about
a particular user in their session.
'''
pass
async def connection_resolver(self, connection_name, object):
pass
async def mutation_resolver(self, mutation_name, args, fields):
'''
the default behavior for mutations is to look up the event,
publish the correct event type with the args as the body,
and return the fields contained in the result
'''
pass
def get_models(self):
'''
Returns the models managed by this service.
Returns:
(list): the models managed by the service
'''
pass
def _user_session_token(self, user):
pass
def _read_session_token(self, token):
pass
async def _get_matching_user(self, fields=[], **filters):
pass
async def _check_for_matching_user(self, **user_filters):
'''
This function checks if there is a user with the same uid in the
remote user service
Args:
**kwds : the filters of the user to check for
Returns:
(bool): wether or not there is a matching user
'''
pass
async def _create_remote_user(self, **payload):
'''
This method creates a service record in the remote user service
with the given email.
Args:
uid (str): the user identifier to create
Returns:
(dict): a summary of the user that was created
'''
pass
| 19 | 11 | 22 | 3 | 10 | 9 | 2 | 0.89 | 1 | 11 | 2 | 0 | 17 | 1 | 17 | 42 | 441 | 90 | 186 | 69 | 167 | 165 | 135 | 66 | 117 | 9 | 4 | 3 | 38 |
3,669 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/nautilus/network/http/requestHandler.py
|
nautilus.network.http.requestHandler.RequestHandler
|
class RequestHandler(web.View):
"""
The base class for nautilus http request handlers.
Example:
import nautilus
from nautilus.network.http import RequestHandler, Response
class MyService(nautilus.Service): pass
@MyService.route('/')
class MyRequestHandler(RequestHandler):
async def get(self):
self.finish('hello')
"""
async def post(self):
# self.check_xsrf_cookie()
pass
async def options(self):
return web.Response(status=204, body=b'')
|
class RequestHandler(web.View):
'''
The base class for nautilus http request handlers.
Example:
import nautilus
from nautilus.network.http import RequestHandler, Response
class MyService(nautilus.Service): pass
@MyService.route('/')
class MyRequestHandler(RequestHandler):
async def get(self):
self.finish('hello')
'''
async def post(self):
pass
async def options(self):
pass
| 3 | 1 | 3 | 0 | 2 | 1 | 1 | 2.4 | 0 | 0 | 0 | 1 | 2 | 0 | 2 | 2 | 23 | 6 | 5 | 3 | 2 | 12 | 5 | 3 | 2 | 1 | 0 | 0 | 2 |
3,670 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/nautilus/network/events/consumers/api.py
|
nautilus.network.events.consumers.api.APIActionHandler
|
class APIActionHandler(ActionHandler):
"""
This action handler is used by the api service to build a schema
of the underlying services as they announce their existence over
the action system.
"""
consumer_pattern = '(.*\..*\.(?!(pending)))|init|query'
async def handle_action(self, *args, **kwds):
# the combined handler
handler = combine_action_handlers(
# handle event-based queries
# query_handler,
# build the schema of possible services
flexible_api_handler
)
# pass the arguments to the combination handler
await handler(self.service, *args, **kwds)
|
class APIActionHandler(ActionHandler):
'''
This action handler is used by the api service to build a schema
of the underlying services as they announce their existence over
the action system.
'''
async def handle_action(self, *args, **kwds):
pass
| 2 | 1 | 12 | 2 | 5 | 5 | 1 | 1.43 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 10 | 21 | 4 | 7 | 4 | 5 | 10 | 5 | 4 | 3 | 1 | 2 | 0 | 1 |
3,671 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/nautilus/models/serializers/modelSerializer.py
|
nautilus.models.serializers.modelSerializer.ModelSerializer
|
class ModelSerializer(JSONEncoder):
"""
This encoder serializes nautilus models to JSON
"""
def default(self, obj):
try:
# use the custom json handler
return obj._json()
# if the custom json handler doesn't exist
except AttributeError:
# perform the normal behavior
return JSONEncoder.default(self, obj)
def serialize(self, obj):
"""
This function performs the serialization on the given object.
"""
return self.encode(obj)
|
class ModelSerializer(JSONEncoder):
'''
This encoder serializes nautilus models to JSON
'''
def default(self, obj):
pass
def serialize(self, obj):
'''
This function performs the serialization on the given object.
'''
pass
| 3 | 2 | 7 | 1 | 4 | 3 | 2 | 1.13 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 6 | 20 | 3 | 8 | 3 | 5 | 9 | 8 | 3 | 5 | 2 | 2 | 1 | 3 |
3,672 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/nautilus/auth/primitives/passwordHash.py
|
nautilus.auth.primitives.passwordHash.PasswordHash
|
class PasswordHash:
""" This is a wrapper class over password hashes that abstracts equality """
def __init__(self, hash_, rounds=None):
# make sure the hash is valid
if len(hash_) != 60:
raise ValueError('bcrypt hash should be 60 chars.')
elif hash_.count('$'.encode('utf-8')) != 3:
raise ValueError('bcrypt hash should have 3x "$".')
# save the required instance variables
self.hash = hash_
# figure out the current strength based on the saved hash
self.rounds = int(str(self.hash).split('$')[2])
# the intended number of rounds (in case there is an upgrade)
self.desired_rounds = rounds or self.rounds
# this allows us to easily check if a candidate password matches the hash
# using: hash == 'foo'
def __eq__(self, candidate):
"""Hashes the candidate string and compares it to the stored hash."""
if isinstance(candidate, str):
# convert it to a byte string
candidate = candidate.encode('utf-8')
# if the candidate matches the saved hash
if self.hash == bcrypt.hashpw(candidate, self.hash):
# if the computed number of rounds is less than the designated one
if self.rounds < self.desired_rounds:
# rehash the password
self.rehash(candidate)
return True
# otherwise the password doesn't match
else:
return False
def __repr__(self):
"""Simple object representation."""
return '<{}: {}>'.format(type(self).__name__, self.hash)
@classmethod
def new(cls, password, rounds):
"""Creates a PasswordHash from the given password."""
if isinstance(password, str):
password = password.encode('utf-8')
return cls(cls._new(password, rounds))
@classmethod
def coerce(cls, key, value):
"""Ensure that loaded values are PasswordHashes."""
if isinstance(value, PasswordHash):
return value
return super(PasswordHash, cls).coerce(key, value)
@staticmethod
def _new(password, rounds):
"""
Returns a new bcrypt hash for the given password and rounds.
note: Implemented to reduce repitition in `new` and `rehash`.
"""
return bcrypt.hashpw(password, bcrypt.gensalt(rounds))
def rehash(self, password):
"""Recreates the internal hash."""
self.hash = self._new(password, self.desired_rounds)
self.rounds = self.desired_rounds
|
class PasswordHash:
''' This is a wrapper class over password hashes that abstracts equality '''
def __init__(self, hash_, rounds=None):
pass
def __eq__(self, candidate):
'''Hashes the candidate string and compares it to the stored hash.'''
pass
def __repr__(self):
'''Simple object representation.'''
pass
@classmethod
def new(cls, password, rounds):
'''Creates a PasswordHash from the given password.'''
pass
@classmethod
def coerce(cls, key, value):
'''Ensure that loaded values are PasswordHashes.'''
pass
@staticmethod
def _new(password, rounds):
'''
Returns a new bcrypt hash for the given password and rounds.
note: Implemented to reduce repitition in `new` and `rehash`.
'''
pass
def rehash(self, password):
'''Recreates the internal hash.'''
pass
| 11 | 7 | 8 | 1 | 5 | 3 | 2 | 0.58 | 0 | 5 | 0 | 0 | 4 | 3 | 7 | 7 | 74 | 17 | 36 | 14 | 25 | 21 | 31 | 11 | 23 | 4 | 0 | 2 | 14 |
3,673 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/nautilus/config/config.py
|
nautilus.config.config.Config
|
class Config(dict):
"""
This class creates a general api for configuration.
"""
def __init__(self, *args, **kwds):
# start off with the given values
values = kwds
# for each argument passed
for arg in args:
# if the argument is a dictionary
if isinstance(arg, dict):
values.update(arg)
# otherwise if the argument is a class record
if isinstance(arg, type):
values.update(self._from_type(arg))
# use the kwds as keys
self.update(kwds)
def __getattr__(self, attr):
"""
This method allows the retrieval of internal keys like attributes
"""
# access the dictionary for attributes
return self[attr]
def __setattr__(self, attr, value):
"""
This method allows the setting of internal keys like attributes
"""
# update the internal structure
self[attr] = value
def _from_type(self, config):
"""
This method converts a type into a dict.
"""
def is_user_attribute(attr):
return (
not attr.startswith('__') and
not isinstance(getattr(config, attr), collections.abc.Callable)
)
return {attr: getattr(config, attr) for attr in dir(config) \
if is_user_attribute(attr)}
|
class Config(dict):
'''
This class creates a general api for configuration.
'''
def __init__(self, *args, **kwds):
pass
def __getattr__(self, attr):
'''
This method allows the retrieval of internal keys like attributes
'''
pass
def __setattr__(self, attr, value):
'''
This method allows the setting of internal keys like attributes
'''
pass
def _from_type(self, config):
'''
This method converts a type into a dict.
'''
pass
def is_user_attribute(attr):
pass
| 6 | 4 | 9 | 1 | 5 | 3 | 2 | 0.9 | 1 | 1 | 0 | 0 | 4 | 0 | 4 | 31 | 50 | 10 | 21 | 8 | 15 | 19 | 17 | 8 | 11 | 4 | 2 | 2 | 8 |
3,674 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/nautilus/services/service.py
|
nautilus.services.service.ServiceActionHandler
|
class ServiceActionHandler(ActionHandler):
async def handle_action(self, action_type, payload, **kwds):
"""
The default action Handler has no action.
"""
# if there is a service attached to the action handler
if hasattr(self, 'service'):
# handle roll calls
await roll_call_handler(self.service, action_type, payload, **kwds)
|
class ServiceActionHandler(ActionHandler):
async def handle_action(self, action_type, payload, **kwds):
'''
The default action Handler has no action.
'''
pass
| 2 | 1 | 8 | 0 | 3 | 5 | 2 | 1.25 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 10 | 10 | 1 | 4 | 2 | 2 | 5 | 4 | 2 | 2 | 2 | 2 | 1 | 2 |
3,675 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/tests/management/test_scripts.py
|
tests.management.test_scripts.TestUtil
|
class TestUtil(unittest.TestCase):
def setUp(self):
# make a temporary directory
self.tempdir = tempfile.mkdtemp()
# save the current working directory
self.cwd = os.getcwd()
# change the current working directory to the temporary directory
os.chdir(self.tempdir)
def tearDown(self):
# change the cwd back
os.chdir(self.cwd)
# remove the temporary directory
shutil.rmtree(self.tempdir)
def test_can_create_model_service(self):
# import the model service creation script
from nautilus.management.scripts.create import model
# create a model
model.callback('foo')
def test_can_create_connection_service(self):
# import the model service creation script
from nautilus.management.scripts.create import connection
# create a model
connection.callback(['foo:bar'])
def test_can_create_api(self):
# import the model service creation script
from nautilus.management.scripts.create import api
# create a model
api.callback()
|
class TestUtil(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_can_create_model_service(self):
pass
def test_can_create_connection_service(self):
pass
def test_can_create_api(self):
pass
| 6 | 0 | 5 | 0 | 3 | 2 | 1 | 0.65 | 1 | 0 | 0 | 0 | 5 | 2 | 5 | 77 | 36 | 8 | 17 | 11 | 8 | 11 | 17 | 11 | 8 | 1 | 2 | 0 | 5 |
3,676 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/nautilus/models/base.py
|
nautilus.models.base._Meta
|
class _Meta(type):
"""
The base metaclass for the nautilus models.
"""
def __init__(self, name, bases, attributes):
# create the super class
super().__init__(name, bases, attributes)
# for each base we inherit from
for base in bases:
# if the base defines some mixin behavior
if hasattr(base, '__mixin__'):
# treat the base like a mixin
base.__mixin__(self)
# save the name in the class
self.model_name = name
|
class _Meta(type):
'''
The base metaclass for the nautilus models.
'''
def __init__(self, name, bases, attributes):
pass
| 2 | 1 | 12 | 1 | 6 | 5 | 3 | 1.14 | 1 | 1 | 0 | 1 | 1 | 1 | 1 | 14 | 17 | 2 | 7 | 4 | 5 | 8 | 7 | 4 | 5 | 3 | 2 | 2 | 3 |
3,677 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/nautilus/models/base.py
|
nautilus.models.base.BaseModel
|
class BaseModel(peewee.Model, metaclass=_MixedMeta):
class Meta:
database = db
def _json(self):
# build a dictionary out of just the columns in the table
return {
field.name: getattr(self, field.name) \
for field in type(self).fields()
}
@classmethod
def primary_key(cls):
"""
Retrieve the primary key of the database table.
"""
return cls._meta.primary_key
@classmethod
def required_fields(cls):
"""
Retrieve the required fields for this model.
"""
return [field for field in cls.fields() if not field.null]
@classmethod
def fields(cls):
"""
Returns the fields of the table.
"""
return cls._meta.fields.values()
|
class BaseModel(peewee.Model, metaclass=_MixedMeta):
class Meta:
def _json(self):
pass
@classmethod
def primary_key(cls):
'''
Retrieve the primary key of the database table.
'''
pass
@classmethod
def required_fields(cls):
'''
Retrieve the required fields for this model.
'''
pass
@classmethod
def fields(cls):
'''
Returns the fields of the table.
'''
pass
| 9 | 3 | 5 | 0 | 3 | 3 | 1 | 0.59 | 2 | 0 | 0 | 0 | 1 | 0 | 4 | 18 | 36 | 9 | 17 | 10 | 8 | 10 | 11 | 7 | 5 | 1 | 4 | 0 | 4 |
3,678 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/nautilus/contrib/graphene_peewee/objectType.py
|
nautilus.contrib.graphene_peewee.objectType.PeeweeObjectTypeOptions
|
class PeeweeObjectTypeOptions(ObjectTypeOptions):
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
self.valid_attrs += VALID_ATTRS
self.model = None
def contribute_to_class(self, cls, name):
# bubble up the chain
super().contribute_to_class(cls, name)
# add the model to the class record
cls.model = self.model
|
class PeeweeObjectTypeOptions(ObjectTypeOptions):
def __init__(self, *args, **kwds):
pass
def contribute_to_class(self, cls, name):
pass
| 3 | 0 | 5 | 0 | 4 | 1 | 1 | 0.25 | 1 | 1 | 0 | 0 | 2 | 1 | 2 | 2 | 12 | 2 | 8 | 4 | 5 | 2 | 8 | 4 | 5 | 1 | 1 | 0 | 2 |
3,679 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/nautilus/contrib/graphene_peewee/objectType.py
|
nautilus.contrib.graphene_peewee.objectType.PeeweeObjectTypeMeta
|
class PeeweeObjectTypeMeta(type(ObjectType)):
options_class = PeeweeObjectTypeOptions
def construct(self, *args, **kwds):
# pass the model to the class record
self.model = self._meta.model
# return the full class record
return super().construct(*args, **kwds)
def __new__(cls, name, bases, attributes, **kwds):
full_attr = {}
try:
# for each field in the table
for field in attributes['Meta'].model.fields():
# the name of the field in the schema
field_name = field.name[0].lower() + field.name[1:]
# add an entry for the field we were passed
full_attr[field_name] = convert_peewee_field(field)
# if there is no meta type defined
except KeyError:
# keep going
pass
# if there is no model defined
except AttributeError:
# yell loudly
raise ValueError("PeeweeObjectsTypes must have a model.")
# merge the given attributes ontop of the dynamic ones
full_attr.update(attributes)
# create the nex class records
return super().__new__(cls, name, bases, full_attr, **kwds)
|
class PeeweeObjectTypeMeta(type(ObjectType)):
def construct(self, *args, **kwds):
pass
def __new__(cls, name, bases, attributes, **kwds):
pass
| 3 | 0 | 16 | 3 | 8 | 6 | 3 | 0.65 | 1 | 4 | 0 | 1 | 2 | 1 | 2 | 15 | 37 | 9 | 17 | 8 | 14 | 11 | 17 | 8 | 14 | 4 | 2 | 2 | 5 |
3,680 |
AlecAivazis/graphql-over-kafka
|
/Users/umroot/Documents/PhD_works/PhD-Core-Contents/Class-level-dataset-curation/data/git_repos_for_analysis/AlecAivazis_graphql-over-kafka/nautilus/api/endpoints/requestHandlers/graphiql.py
|
nautilus.api.endpoints.requestHandlers.graphiql.GraphiQLRequestHandler
|
class GraphiQLRequestHandler(RequestHandler):
@aiohttp_jinja2.template('graphiql.html')
async def get(self):
# write the template to the client
return {}
|
class GraphiQLRequestHandler(RequestHandler):
@aiohttp_jinja2.template('graphiql.html')
async def get(self):
pass
| 3 | 0 | 3 | 0 | 2 | 1 | 1 | 0.25 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 6 | 1 | 4 | 3 | 1 | 1 | 3 | 2 | 1 | 1 | 1 | 0 | 1 |
3,681 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/nautilus/services/service.py
|
nautilus.services.service.ServiceMetaClass
|
class ServiceMetaClass(type):
def __init__(cls, name, bases, attributes):
from nautilus.conventions.models import normalize_string
# create the super class
super().__init__(name, bases, attributes)
# the base service strings
base_strings = [normalize_string(name) for name in [
'service',
'modelService',
'ConnectionService'
]]
# if the object does not yet have a name
if not cls.name or cls.name in base_strings:
# use the name of the class record
cls.name = normalize_string(name)
|
class ServiceMetaClass(type):
def __init__(cls, name, bases, attributes):
pass
| 2 | 0 | 17 | 3 | 10 | 4 | 2 | 0.36 | 1 | 1 | 0 | 1 | 1 | 0 | 1 | 14 | 18 | 3 | 11 | 4 | 8 | 4 | 7 | 4 | 4 | 2 | 2 | 1 | 2 |
3,682 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/example/recipes.py
|
recipes.ServiceConfig
|
class ServiceConfig:
database_url = 'sqlite:///recipes.db'
|
class ServiceConfig:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 0 | 0 | 0 |
3,683 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/example/recipeIngredients.py
|
recipeIngredients.ServiceConfig
|
class ServiceConfig:
database_url = 'sqlite:///ingredientRecipeConnections.db'
|
class ServiceConfig:
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 1 | 0 | 2 | 2 | 1 | 0 | 0 | 0 | 0 |
3,684 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/tests/conventions/test_services.py
|
tests.conventions.test_services.TestUtil
|
class TestUtil(unittest.TestCase):
"""
This test suite looks at the various utilities for manipulating
models.
"""
def test_model_service_name(self):
# a model to test with
class TestModel(nautilus.models.BaseModel):
name = nautilus.models.fields.CharField()
TestModel = MockModel()
# generate a service from the model
class TestService(nautilus.ModelService):
model = TestModel
# make sure we could generate a name for it
assert isinstance(conventions.model_service_name(TestService), str), (
"Could not generate name for model service"
)
def test_model_service_name_accepts_numbers(self):
# a model to test with
class TestModel(nautilus.models.BaseModel):
name = nautilus.models.fields.CharField()
# generate a service from the model
class TestService1(nautilus.ModelService):
model = TestModel
# figure out the conventional name for the service
service_name = conventions.model_service_name(TestService1)
# make sure we could generate a name for it
assert (
isinstance(service_name, str) and
'1' in service_name
), (
"Could not generate name for model service when it has a number."
)
def test_auth_service_name(self):
# make sure we can generate a name for the auth service
assert isinstance(conventions.auth_service_name(), str), (
"Could not generate name for auth service."
)
def test_api_gateway_name(self):
# make sure we can generate a name for the auth service
assert isinstance(conventions.api_gateway_name(), str), (
"Could not generate name for auth service."
)
def test_connection_service_name(self):
# two models to test
# a connection service for both
class Connection(nautilus.ConnectionService):
to_service = ('TestService1',)
from_service = ('TestService2',)
# make sure we could make a name
assert isinstance(conventions.connection_service_name(Connection()), str), (
"Could not generate name for connection service"
)
|
class TestUtil(unittest.TestCase):
'''
This test suite looks at the various utilities for manipulating
models.
'''
def test_model_service_name(self):
pass
class TestModel(nautilus.models.BaseModel):
class TestService(nautilus.ModelService):
def test_model_service_name_accepts_numbers(self):
pass
class TestModel(nautilus.models.BaseModel):
class TestService1(nautilus.ModelService):
def test_auth_service_name(self):
pass
def test_api_gateway_name(self):
pass
def test_connection_service_name(self):
pass
class Connection(nautilus.ConnectionService):
| 11 | 1 | 10 | 1 | 7 | 2 | 1 | 0.43 | 1 | 5 | 4 | 0 | 5 | 0 | 5 | 77 | 64 | 11 | 37 | 18 | 26 | 16 | 24 | 18 | 13 | 1 | 2 | 0 | 5 |
3,685 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/tests/conventions/test_models.py
|
tests.conventions.test_models.TestUtil
|
class TestUtil(unittest.TestCase):
"""
This test suite looks at the various utilities for manipulating
models.
"""
def test_model_string(self):
model = MockModel()
# save the model to the test suite
assert isinstance(get_model_string(model), str), (
"Could not generate string for model"
)
def test_normalize_string_handles_ClassCase(self):
string = 'FooBar'
assert normalize_string(string) == 'fooBar', (
"ClassCase string could not be normalized"
)
|
class TestUtil(unittest.TestCase):
'''
This test suite looks at the various utilities for manipulating
models.
'''
def test_model_string(self):
pass
def test_normalize_string_handles_ClassCase(self):
pass
| 3 | 1 | 7 | 2 | 5 | 1 | 1 | 0.45 | 1 | 1 | 0 | 0 | 2 | 0 | 2 | 74 | 21 | 5 | 11 | 5 | 8 | 5 | 7 | 5 | 4 | 1 | 2 | 0 | 2 |
3,686 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/tests/conventions/test_auth.py
|
tests.conventions.test_auth.TestUtil
|
class TestUtil(unittest.TestCase):
"""
This test suite looks at the various utilities for manipulating
models.
"""
def test_cookie_name(self):
# save the model to the test suite
assert isinstance(cookie_name(), str), (
"Could not generate string for model"
)
|
class TestUtil(unittest.TestCase):
'''
This test suite looks at the various utilities for manipulating
models.
'''
def test_cookie_name(self):
pass
| 2 | 1 | 6 | 1 | 4 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 1 | 73 | 12 | 2 | 5 | 2 | 3 | 5 | 3 | 2 | 1 | 1 | 2 | 0 | 1 |
3,687 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/tests/conventions/test_api.py
|
tests.conventions.test_api.TestUtil
|
class TestUtil(unittest.TestCase):
"""
This test suite looks at the various utilities for manipulating
models.
"""
def setUp(self):
# create a mock model service
self.model_service = MockModelService()
def test_root_query(self):
# import the utility
from nautilus.conventions.api import root_query
# save the model to the test suite
assert isinstance(root_query(), str), (
"Could not a root query string for schema"
)
def test_crud_mutation_name(self):
# import the utility
from nautilus.conventions.api import crud_mutation_name
# make sure we can generate a mutation name, and that it's a string
assert isinstance(crud_mutation_name(self.model_service, 'create'), str), (
"Could not generate string name for model service mutation"
)
def test_create_mutation_inputs(self):
# create the list of inputs
inputs = create_mutation_inputs(self.model_service)
# make sure the inputs match the model
from nautilus.api.util import summarize_mutation_io
# the dictionary of fields corresponding to the service record
field_dict = self.model_service.model._meta.fields
# the expected values
expected = [summarize_mutation_io(name=key, type=_graphql_type_string(value), required=(not value.null)) \
for key,value in field_dict.items()]
# make sure the pk isn't required
expected.remove([field for field in expected if field['name'] == 'id'][0])
assert inputs == expected, (
"Create mutation inputs did not match expecttations"
)
def test_create_mutation_outputs(self):
# create the list of inputs
outputs = create_mutation_outputs(self.model_service)
# the output of a create mutation should be the object corresponding
# to the model created
assert outputs == [_summarize_o_mutation_type(self.model_service.model)], (
"Create mutation output was not correct."
)
def test_update_mutation_inputs(self):
# create the list of inputs
inputs = update_mutation_inputs(self.model_service)
# the inputs of an update mutation should be the fieldsof the object
# no required args except pk to identify the target
# make sure the inputs match the model
from nautilus.api.util import summarize_mutation_io
# the dictionary of fields corresponding to the service record
field_dict = self.model_service.model._meta.fields
# the expected values
expected = [summarize_mutation_io(name=key, type=_graphql_type_string(value), required=(not value.null)) \
for key,value in field_dict.items()]
# make sure only the pk is required
for field in expected:
if field['name'] == 'id':
field['required'] = True
else:
field['required'] = False
assert inputs == expected, (
"Update mutation inputs did not match expecttations"
)
def test_update_mutation_outputs(self):
# create the list of inputs
inputs = update_mutation_outputs(self.model_service)
# the output of an update mutation should be a graphql object corresponding
# to the newly updated object
assert inputs == [_summarize_o_mutation_type(self.model_service.model)], (
"Update mutation output was not correct."
)
def test_delete_mutation_inputs(self):
# the input of a delete mutation is just the pk of the model
from nautilus.api.util import summarize_mutation_io
# create the list of inputs
inputs = delete_mutation_inputs(self.model_service)
# the only input for delete events is the pk of the service record
expected = [summarize_mutation_io(name='pk', type='ID', required=True)]
# make sure the result matches what we expected
assert inputs == expected, (
"Delete mutation inputs were incorrect"
)
def test_delete_mutation_outputs(self):
# delete the list of inputs
inputs = delete_mutation_outputs(self.model_service)
# the output of a delete mutation is a status message indicating wether or
# not the mutation was successful
from nautilus.api.util import summarize_mutation_io
# the only input for delete events is the pk of the service record
expected = [summarize_mutation_io(name='status', type='String', required=True)]
# make sure the result matches what we expected
assert inputs == expected, (
"Delete mutation outputs were incorrect"
)
def test__summarize_object_type(self):
from nautilus.api.util import summarize_mutation_io
# summarize the model of the test service
summarized = _summarize_object_type(self.model_service.model)
target = {
'fields': [
{'type': 'String', 'name': 'date'},
{'type': 'String', 'name': 'name'},
{'type': 'ID', 'name': 'id'}
]
}
assert sort_dict(target['fields']) == sort_dict(summarized['fields']), (
"Internal summary utility did not return the right object"
)
|
class TestUtil(unittest.TestCase):
'''
This test suite looks at the various utilities for manipulating
models.
'''
def setUp(self):
pass
def test_root_query(self):
pass
def test_crud_mutation_name(self):
pass
def test_create_mutation_inputs(self):
pass
def test_create_mutation_outputs(self):
pass
def test_update_mutation_inputs(self):
pass
def test_update_mutation_outputs(self):
pass
def test_delete_mutation_inputs(self):
pass
def test_delete_mutation_outputs(self):
pass
def test__summarize_object_type(self):
pass
| 11 | 1 | 13 | 2 | 7 | 3 | 1 | 0.5 | 1 | 1 | 0 | 0 | 10 | 1 | 10 | 82 | 151 | 40 | 74 | 34 | 56 | 37 | 47 | 34 | 29 | 3 | 2 | 2 | 12 |
3,688 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/tests/conventions/test_action_types.py
|
tests.conventions.test_action_types.TestUtil
|
class TestUtil(unittest.TestCase):
"""
This test suite looks at the various utilities for manipulating
action types.
"""
def setUp(self):
# save the model to the test suite
self.model = MockModel()
def test_can_generate_crud_action_types(self):
# try to creat a crud action for a model
action_type = get_crud_action(
method='create',
model=self.model
)
assert action_type == 'create.testModel.pending', (
"New action type did not have the correct form."
)
def test_can_change_action_types(self):
# try to creat a crud action for a model
action_type = get_crud_action(method='create', model=self.model)
# try to change the action
success_action = change_action_status(action_type, 'success')
# make sure the new action type is in the result
assert success_action == 'create.testModel.success', (
"New action type did not have the correct form."
)
def test_can_generate_init_action_for_service(self):
# verify we get a string back
assert isinstance(intialize_service_action(), str)
def test_can_generate_init_action_catchall(self):
# create the action
action = intialize_service_action(all_services=True)
# verify we got a string back
assert isinstance(action, str) and '*' in action
def test_can_serialize_and_deserialize_action(self):
from nautilus.conventions.actions import serialize_action, hydrate_action
# the target
target = dict(
action_type='hello',
payload='world'
)
# the hydrated form of the object
serialized = serialize_action(**target)
# make sure we can hydrate the hydrated form into the target
assert hydrate_action(serialized) == target, (
"Could not serialize/deserialize action."
)
def test_can_serialize_and_deserialize_action_with_extra_fields(self):
from nautilus.conventions.actions import serialize_action, hydrate_action
# the target
target = dict(
foo='bar',
action_type='hello',
payload='world'
)
# the hydrated form of the object
serialized = serialize_action(**target)
# make sure we can hydrate the hydrated form into the target
assert hydrate_action(serialized) == target, (
"Could not serialize action with extra fields."
)
def test_can_hydrate_extra_fields(self):
from nautilus.conventions.actions import serialize_action, hydrate_action
# the target
target = dict(action_type='foo', payload='bar', foo='bar')
# the serialized form of the object
serialized = serialize_action(**target)
# make sure we can hydrate the serialized form into the target
assert hydrate_action(serialized) == target, (
"Could not hydrate action with extra fields."
)
def test_query_action(self):
# create the default action type
action_type = query_action_type()
# make sure we got a string back
assert isinstance(action_type, str)
def test_has_success_status(self):
# create the success status
status = success_status()
# make sure its a string
assert isinstance(status, str)
def test_has_error_status(self):
# create the error status
status = error_status()
# make sure its a string
assert isinstance(status, str)
def test_has_pending_status(self):
# create the pending status
status = pending_status()
# make sure its a string
assert isinstance(status, str)
|
class TestUtil(unittest.TestCase):
'''
This test suite looks at the various utilities for manipulating
action types.
'''
def setUp(self):
pass
def test_can_generate_crud_action_types(self):
pass
def test_can_change_action_types(self):
pass
def test_can_generate_init_action_for_service(self):
pass
def test_can_generate_init_action_catchall(self):
pass
def test_can_serialize_and_deserialize_action(self):
pass
def test_can_serialize_and_deserialize_action_with_extra_fields(self):
pass
def test_can_hydrate_extra_fields(self):
pass
def test_query_action(self):
pass
def test_has_success_status(self):
pass
def test_has_error_status(self):
pass
def test_has_pending_status(self):
pass
| 13 | 1 | 7 | 0 | 5 | 2 | 1 | 0.47 | 1 | 2 | 0 | 0 | 12 | 1 | 12 | 84 | 116 | 25 | 62 | 31 | 46 | 29 | 42 | 31 | 26 | 1 | 2 | 0 | 12 |
3,689 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/tests/contrib/graphene_peewee/test_objecttype.py
|
tests.contrib.graphene_peewee.test_objecttype.TestUtil
|
class TestUtil(unittest.TestCase):
def setUp(self):
# the base model to test
TestModel = MockModel()
# the object type based on the models
class TestObjectType(PeeweeObjectType):
class Meta:
model = TestModel
# save the mocks to the test case
self.model = TestModel
self.object_type = TestObjectType
def test_generated_object_has_model_fields(self):
# the list of fields in the service object
service_object_fields = {field.default_name \
for field in self.object_type._meta.fields}
# the list of fields in the models
model_fields = {field.name for field in self.model.fields()}
# make sure the two lists are the same
assert model_fields == service_object_fields, (
"PeeweeObjectType does not have the same fields as the model"
)
|
class TestUtil(unittest.TestCase):
def setUp(self):
pass
class TestObjectType(PeeweeObjectType):
class Meta:
def test_generated_object_has_model_fields(self):
pass
| 5 | 0 | 12 | 2 | 7 | 3 | 1 | 0.4 | 1 | 1 | 1 | 0 | 2 | 2 | 2 | 74 | 27 | 6 | 15 | 11 | 10 | 6 | 12 | 11 | 7 | 1 | 2 | 0 | 2 |
3,690 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/tests/contrib/graphene_peewee/test_converter.py
|
tests.contrib.graphene_peewee.test_converter.TestUtil
|
class TestUtil(unittest.TestCase):
def assert_field_converted(self, nautilus_field, graphene_field):
# convert the nautilus field to the corresponding graphene type
test_graphene_type = convert_peewee_field(nautilus_field)
# make sure the converted type matches the graphene field
assert isinstance(test_graphene_type, graphene_field), (
"nautilus field was not properly coverted to %s" % graphene_field.__class__
)
def test_can_convert_BigIntegerField(self):
self.assert_field_converted(nautilus.BigIntegerField(), graphene.Int)
def test_can_convert_BooleanField(self):
self.assert_field_converted(nautilus.BooleanField(), graphene.Boolean)
def test_can_convert_CharField(self):
self.assert_field_converted(nautilus.CharField(), graphene.String)
def test_can_convert_DateField(self):
self.assert_field_converted(nautilus.DateField(), graphene.String)
def test_can_convert_DateTimeField(self):
self.assert_field_converted(nautilus.DateTimeField(), graphene.String)
def test_can_convert_DecimalField(self):
self.assert_field_converted(nautilus.DecimalField(), graphene.Float)
def test_can_convert_DoubleField(self):
self.assert_field_converted(nautilus.DoubleField(), graphene.Float)
def test_can_convert_FixedCharField(self):
self.assert_field_converted(nautilus.FixedCharField(), graphene.String)
def test_can_convert_FloatField(self):
self.assert_field_converted(nautilus.FloatField(), graphene.Float)
def test_can_convert_IntegerField(self):
self.assert_field_converted(nautilus.IntegerField(), graphene.Int)
def test_can_convert_PrimaryKeyField(self):
self.assert_field_converted(nautilus.PrimaryKeyField(), graphene.ID)
def test_can_convert_TextField(self):
self.assert_field_converted(nautilus.TextField(), graphene.String)
def test_can_convert_TimeField(self):
self.assert_field_converted(nautilus.TimeField(), graphene.String)
def test_can_convert_UUIDField(self):
self.assert_field_converted(nautilus.UUIDField(), graphene.String)
|
class TestUtil(unittest.TestCase):
def assert_field_converted(self, nautilus_field, graphene_field):
pass
def test_can_convert_BigIntegerField(self):
pass
def test_can_convert_BooleanField(self):
pass
def test_can_convert_CharField(self):
pass
def test_can_convert_DateField(self):
pass
def test_can_convert_DateTimeField(self):
pass
def test_can_convert_DecimalField(self):
pass
def test_can_convert_DoubleField(self):
pass
def test_can_convert_FixedCharField(self):
pass
def test_can_convert_FloatField(self):
pass
def test_can_convert_IntegerField(self):
pass
def test_can_convert_PrimaryKeyField(self):
pass
def test_can_convert_TextField(self):
pass
def test_can_convert_TimeField(self):
pass
def test_can_convert_UUIDField(self):
pass
| 16 | 0 | 2 | 0 | 2 | 0 | 1 | 0.06 | 1 | 0 | 0 | 0 | 15 | 0 | 15 | 87 | 52 | 16 | 34 | 17 | 18 | 2 | 32 | 17 | 16 | 1 | 2 | 0 | 15 |
3,691 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/tests/config/test_config.py
|
tests.config.test_config.TestUtil
|
class TestUtil(unittest.TestCase):
def check_configuration(self, config, message="Wrong configuration."):
# make sure the configuration object looks like we expect
assert config == {'foo': 'bar'} , message
def test_can_read_keys_as_attribute(self):
# create a config object to test
config = Config(foo='bar')
# validate the config object
assert config.foo == 'bar', (
"Attribute could not be read"
)
def test_can_set_keys_as_attrbutes(self):
# create a config object to test
config = Config(foo='bar')
# update the attrbute
config.foo = 'quz'
# validate the config object
assert config['foo'] == 'quz', (
"Attributes could not be updated."
)
def test_can_accept_multiple_arguments(self):
# create a config object with two arguments
config = Config({'foo': 'bar'}, {'bar': 'baz'})
# make sure both applied
assert config['foo'] == 'bar' and config['bar'] == 'baz', (
"Config could not mix in multiple values."
)
def test_can_accept_kwds(self):
# create a config object to test
config = Config(foo='bar')
# validate the config object
self.check_configuration(config,
"Configuration object could not accept keywords."
)
def test_can_accept_dict(self):
# the configuration dictionary
config_dict = dict(foo='bar')
# create a config object out of the dictionary
config = Config(config_dict)
# validate the config object
self.check_configuration(config,
"Configuration object could not accept dictionaries."
)
def test_can_accept_type(self):
# the configuration type
class ConfigType:
foo = 'bar'
# add a function to the test too
def func(self): pass
# create the config object from the type
config = Config(ConfigType)
# validate the config object
self.check_configuration(config,
"Configuration object could not accept types."
)
def test_can_accept_config_object(self):
# create a config object
config1 = Config(foo='bar')
# create a config object out of that object
config2 = Config(config1)
# validate the config object
self.check_configuration(config2,
"Configuration object could not accept other config objects."
)
def test_can_update_with_another_config(self):
# create a config object
config1 = Config(foo='bar')
# create a config object out of that object
config2 = Config(bar='baz')
# merge the two configs
config1.update({'bar':'baz'})
# make sure one can be applied on the other
assert config1 == {'foo': 'bar', 'bar': 'baz'}, (
"Config could not be updated with another."
)
def test_can_accept_none(self):
# create a config with nothing
config = Config(None)
# make sure it created an empty config
assert config == {}, (
"Config(None) did not create an empty config."
)
|
class TestUtil(unittest.TestCase):
def check_configuration(self, config, message="Wrong configuration."):
pass
def test_can_read_keys_as_attribute(self):
pass
def test_can_set_keys_as_attrbutes(self):
pass
def test_can_accept_multiple_arguments(self):
pass
def test_can_accept_kwds(self):
pass
def test_can_accept_dict(self):
pass
def test_can_accept_type(self):
pass
class ConfigType:
def func(self):
pass
def test_can_accept_config_object(self):
pass
def test_can_update_with_another_config(self):
pass
def test_can_accept_none(self):
pass
| 13 | 0 | 8 | 0 | 5 | 2 | 1 | 0.46 | 1 | 2 | 1 | 0 | 10 | 0 | 10 | 82 | 102 | 20 | 56 | 26 | 44 | 26 | 39 | 26 | 26 | 1 | 2 | 0 | 11 |
3,692 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/tests/auth/test_util.py
|
tests.auth.test_util.TestUtil
|
class TestUtil(unittest.TestCase):
"""
This test suite checks the behavior of the various mixins that come
with nautilus.
"""
def test_has_session_encryption_algorithm(self):
# just make sure we have a value
assert isinstance(token_encryption_algorithm(), str), (
"Could not retrieve session token encryption algorithm."
)
def test_read_write_session_token(self):
# the secret key to use
secret_key = 'asdf'
# generate a session token
session_token = generate_session_token(secret_key, user=1)
# make sure we got a string back
assert isinstance(session_token, str), (
"Generated session token was not a string."
)
# make sure we can read it back
assert read_session_token(secret_key, session_token) == {
'user': 1
}, (
"Read session token did not match expecatations."
)
try:
# make sure it would fail if we passed an invalid key
read_session_token(secret_key, session_token)
# if we got here then something went wrong
raise AssertionError("Invalid key was able to read session token")
except:
pass
|
class TestUtil(unittest.TestCase):
'''
This test suite checks the behavior of the various mixins that come
with nautilus.
'''
def test_has_session_encryption_algorithm(self):
pass
def test_read_write_session_token(self):
pass
| 3 | 1 | 15 | 1 | 10 | 4 | 2 | 0.52 | 1 | 2 | 0 | 0 | 2 | 0 | 2 | 74 | 37 | 5 | 21 | 5 | 18 | 11 | 13 | 5 | 10 | 2 | 2 | 1 | 3 |
3,693 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/nautilus/auth/models/userPassword.py
|
nautilus.auth.models.userPassword.UserPassword
|
class UserPassword(HasPassword, BaseModel):
user = fields.CharField(unique=True)
|
class UserPassword(HasPassword, BaseModel):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0 | 2 | 2 | 1 | 1 | 2 | 2 | 1 | 0 | 1 | 0 | 0 |
3,694 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/tests/auth/test_fields.py
|
tests.auth.test_fields.TestUtil
|
class TestUtil(unittest.TestCase):
"""
This test suite checks the behavior of the various mixins that come
with nautilus.
"""
def setUp(self):
# point the database to a in-memory sqlite database
nautilus.database.init_db('sqlite:///test.db')
def test_password_field(self):
# create a table with a password
class TestPassword(models.BaseModel):
password = nautilus.auth.PasswordField()
# create the table
TestPassword.create_table(True)
# create an instance of the table with a password
record = TestPassword(password="foo")
# save the record to the database
record.save()
# retireve the record
password = TestPassword.get(TestPassword.id == record.id).password
# make sure there is a hash assocaited with the password
assert hasattr(password, 'hash') , (
"Retrieved record's password did not come with a hash"
)
# make sure that hash hide the password
assert password.hash != 'foo' , (
"Retrieved record's password is in plain sight!"
)
# make sure we can check for password equality
assert password == 'foo', (
'Password could not checked for equality.'
)
# remove the table from the database
TestPassword.drop_table()
|
class TestUtil(unittest.TestCase):
'''
This test suite checks the behavior of the various mixins that come
with nautilus.
'''
def setUp(self):
pass
def test_password_field(self):
pass
class TestPassword(models.BaseModel):
| 4 | 1 | 17 | 2 | 10 | 5 | 1 | 0.7 | 1 | 1 | 1 | 0 | 2 | 0 | 2 | 74 | 41 | 7 | 20 | 7 | 16 | 14 | 14 | 7 | 10 | 1 | 2 | 0 | 2 |
3,695 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/tests/api/test_util.py
|
tests.api.test_util.TestUtil
|
class TestUtil(unittest.TestCase):
def test_create_model_schema(self):
# create a graphql schema from the model
schema = create_model_schema(MockModel())
# the fields in the schema
schema_fields = schema.introspect()['__schema']['types'][0]['fields']
# make sure there is only one field
self.assertRaises(IndexError, lambda: schema_fields[1])
# the single field in the schema
field = schema_fields[0]
# make sure that field matches the convention
assert field['name'] == nautilus.conventions.api.root_query(), (
'The generated schema does not have a field named `all_models`'
)
# grab the arguments for the field
arg_names = {arg['name'] for arg in field['args']}
# make sure the test field is present
assert 'name' in arg_names, (
"The generated schema cannot be filterd for model fields."
)
def test_generate_api_schema(self):
# create mock summaries
model_summary = MockModelService()().summarize()
connection_summary = MockConnectionService()().summarize()
# create the graphql schema
schema = generate_api_schema([model_summary], [connection_summary])
# grab the object type corresponding to our mock
field = [field for field in schema.query._meta.local_fields if field.default_name == 'testModel']
# make sure such an object type exists
assert field, (
"No object type added to api schema"
)
def test_generate_api_schema_with_mutation(self):
model_service = MockModelService()()
# create mock summaries
model_summary = model_service.summarize()
mutation_summary = summarize_crud_mutation(model=model_service, method='create')
# create the graphql schema
schema = generate_api_schema(
models=[model_summary],
mutations=[mutation_summary]
)
# the list of mutations in the schema
schema_mutations = [field.default_name for field in schema.mutation._meta.local_fields]
# make sure the schema has the correct mutation list
assert schema_mutations == ['createTestModel'], (
"Generated schema did not have the correct mutations"
)
def test_graphql_type_from_summary(self):
# a mock model service summary
summary = MockModelService()().summarize()
# create the graphql type from the summary
graphql_type = graphql_type_from_summary(summary, [])
# grab a list of the fields of the generated type
fields = {field.default_name for field in graphql_type._meta.local_fields}
# make sure they are what we expect
assert fields == {'id', 'name', 'date'} , (
"Generated graphql type does not have the correct fields"
)
def test_graphql_type_from_summary_with_connections(self):
# mock summaries
summary = MockModelService()().summarize()
connection_summary = MockConnectionService()().summarize()
# create the graphql type from the summary
graphql_type = graphql_type_from_summary(summary, [connection_summary])
# grab a list of the fields of the generated type
fields = {field.default_name for field in graphql_type._meta.local_fields}
# make sure they are what we expect
assert fields == {'id', 'name', 'date', 'testConnection'} , (
"Generated graphql type with connection does not have the correct fields"
)
def test_graphql_mutation_from_summary(self):
# create a mock mutation summary
mock_summary = summarize_crud_mutation(model=MockModelService(), method="delete")
# create the mutation
mutation = graphql_mutation_from_summary(mock_summary)
# create a schema to test the mutation
mock_schema = graphene.Schema()
# get the corresponding object type
mutation_object = mock_schema.T(mutation)
mutation_fields = list(mutation_object.get_fields().keys())
# there should be one field named status
assert mutation_fields == ['status'], (
"Delete mutation did not have correct output"
)
def test_graphql_mutation_with_object_io_from_summary(self):
# create a mock mutation summary with a io that's an object
mock_summary = summarize_crud_mutation(model=MockModelService(), method="create")
# create the mutation
mutation = graphql_mutation_from_summary(mock_summary)
# create a schema to test the mutation
mock_schema = graphene.Schema()
# get the corresponding object type
mutation_object = mock_schema.T(mutation)
# make sure there is a resulting 'testModel' in the mutation
assert 'testModel' in mutation_object.get_fields(), (
"Generated create mutation from summary does not have a service record in its output."
)
# the fields of the mutation result
output_fields = set(mutation_object.get_fields()['testModel'].type.get_fields().keys())
# make sure the object has the right types
assert output_fields == {'date', 'name', 'id'}, (
"Mutation output did not have the correct fields."
)
def test_walk_query(self):
"""
This function is implicitly tested when checking parse_string
"""
@async_test
async def test_parse_mutation_string(self):
# the query to parse
query = """
mutation {
myMutation {
name
}
}
"""
# the resolver for models
async def model_resolver(object_name, fields, **filters):
return {field: 'hello' for field in fields}
async def connection_resolver(connection_name, object):
return 'hello'
async def mutation_resolver(mutation_name, args, fields):
return {
field: mutation_name for field in fields
}
# parse the string with the query
result = await parse_string(query, model_resolver, connection_resolver, mutation_resolver)
# make sure the value is correct
assert result == {
'errors': [],
'mutation': {'myMutation': {'name': 'myMutation'}},
}, (
"Could not parse mutation string correctly."
)
@async_test
async def test_parse_string(self):
# the query to parse
query = """
query {
model {
name
}
}
"""
# the resolver for models
async def model_resolver(object_name, fields, **filters):
return {field: 'hello' for field in fields}
async def connection_resolver(connection_name, object):
return 'hello'
async def mutation_resolver(mutation_name, args, fields):
return 'hello'
# parse the string with the query
result = await parse_string(query, model_resolver, connection_resolver, mutation_resolver)
# make sure the value is correct
assert result == {
'errors': [],
'data': {'model': {'name': 'hello'}},
}, (
"Could not parse string correctly."
)
def test_fields_for_model(self):
# a mock to test with
model = MockModel()
# create the dictionary of fields for the model
fields = fields_for_model(model)
assert 'name' in fields and 'date' in fields and 'id' in fields , (
"Could not create correct fields for model"
)
assert isinstance(fields['date'], graphene.String) , (
"Field summary did not have the correct type"
)
def test_can_summarize_mutation(self):
# summarize a mock mutation
summarized = summarize_mutation(
mutation_name='test_mutation',
event='foo.bar',
inputs=['foo','bar'],
outputs=['bar','baz']
)
# check that it matches what we expect
expected = {
'name': 'test_mutation',
'event': 'foo.bar',
'isAsync': False,
'inputs': ['foo','bar'],
'outputs': ['bar','baz']
}
# make sure the two match
assert summarized == expected, (
"Summarized mutation did not match expectation."
)
def test_can_summarize_async_mutation(self):
# summarize a mock mutation
summarized = summarize_mutation(
'test_mutation',
'foo.bar',
isAsync=True,
inputs=['foo','bar'],
outputs=['bar','baz']
)
# check that it matches what we expect
expected = {
'name': 'test_mutation',
'event': 'foo.bar',
'isAsync': True,
'inputs':['foo','bar'],
'outputs': ['bar','baz']
}
# make sure the two match
assert summarized == expected, (
"Summarized async mutation did not match expectation. Found {}, expected {}"\
.format(summarized, expected)
)
def test_mutation_io_summary(self):
# make sure we can summary a mutation io
summarized = summarize_mutation_io(name="foo", type="bar")
# make sure its a string
assert summarized == {
"name": "foo",
"type": "bar",
"required": False
}, (
"Summarized mutation io did not have the correct form."
)
def test_mutation_required_io_summary(self):
# make sure we can summary a mutation io
summarized = summarize_mutation_io(name="foo", type="bar", required=True)
# make sure its a string
assert summarized == {
"name": "foo",
"type": "bar",
"required": True
}, (
"Required summarized mutation io did not have the correct form."
)
def test_can_summarize_crud_mutation(self):
# a model service to test with
mock = MockModelService()
# make sure we can generate a create mutation
self._verify_crud_mutation(model=mock, action='create')
def test_can_summarize_crud_mutation(self):
# a model service to test with
mock = MockModelService()
# make sure we can generate a delete mutation
self._verify_crud_mutation(model=mock, action='delete')
def test_can_summarize_crud_mutation(self):
# a model service to test with
mock = MockModelService()
# make sure we can generate a update mutation
self._verify_crud_mutation(model=mock, action='update')
def test_convert_typestring_to_api_native(self):
# make sure it converts String to the correct class
assert convert_typestring_to_api_native('String') == graphene.String, (
"Could not convert String to native representation."
)
def test_serialize_native_type(self):
# make sure it converts a native string to 'String'
import nautilus.models.fields as fields
assert serialize_native_type(fields.CharField()) == 'String', (
"Could not serialize native type."
)
def test_build_native_type_dictionary(self): pass
def test_query_for_model_with_filters(self):
# create a query with filters to test
query = query_for_model(['hello'], world=1)
# make sure it matches expectations
assert query == "query { all_models(world: 1) { hello } }", (
"Could not generate query for model with filters."
)
def test_query_for_model_without_filters(self):
# create a query with filters to test
query = query_for_model(['hello'])
# make sure it matches expectations
assert query == "query { all_models { hello } }", (
"Could not generate query for model without filters."
)
def test_graph_entity_needs_to_start_somewhere(self):
# make sure an exception is raised
try:
# try to make an empty one
GraphEntity(service=Mock())
# if we got here then we failed
raise AssertionError("GraphEntity did not require a starting point.")
# if an exception is raised
except ValueError:
# then we pass
pass
def test_graph_entity_maintains_source(self):
# create a graph entity to test
entity = GraphEntity(service=Mock(), model_type='user', id=1)
# check that the source values match up
assert entity._api_path == [{"name": "user", "args": {"id": 1}}], (
"The source node of the graph entity did not match constructor arguments."
)
def test_graph_entity_can_build_path(self):
# create a graph entity to test
entity = GraphEntity(service=Mock(), model_type='user', id=1)
# build a path to test
assert entity.foo.bar._api_path == [
{"name": "user", "args": {"id": 1}},
{"name": "foo", "args": {}},
{"name": "bar", "args": {}}
], "Internal api without args path did not match expectation."
def test_graph_entity_can_build_path_with_args(self):
# create a graph entity to test
entity = GraphEntity(service=Mock(), model_type='user', id=1)
# build a path to test
assert entity.foo(bar="baz").bar._api_path == [
{"name": "user", "args": {"id": 1}},
{"name": "foo", "args": {"bar": "baz"}},
{"name": "bar", "args": {}}
], "Internal api with args path did not match expectation."
def test_graph_entity_query(self):
# the graph entity to test against
entity = GraphEntity(service=Mock(), model_type="user", id=1).foo.bar(arg="2")
# parse the associated query
parsed = graphql.parse(entity._query)
# the target query
target = """
query {
user(id:1) {
foo {
bar(arg:2) {
id
}
}
}
}
"""
# make sure there is a single root query definted
assert len(parsed.definitions) == 1 and parsed.definitions[0].operation == "query", (
"Graph entity parsed query did not have a single definition."
)
top_selection = parsed.definitions[0].selection_set.selections
# make sure there is a single selection with the right name
assert len(top_selection) == 1 and top_selection[0].name.value == 'user', (
"Top selection does not have the right name."
)
# pull out the first and only selection
top_selection = top_selection[0]
top_args = top_selection.arguments
# verify the arguments of the top selection
assert len(top_args) == 1 and top_args[0].name.value == 'id' and top_args[0].value.value == '1', (
"Top selection did not have the right arguments."
)
# the first level deep selection
second_selection_set = top_selection.selection_set.selections
# make sure there is only one and it has no arguments
assert len(second_selection_set) == 1 and second_selection_set[0].name.value == 'foo' \
and len(second_selection_set[0].arguments) == 0, (
"Second selection did not have the right characteristics."
)
second_selection = second_selection_set[0]
# the third level of the selection
third_selection_set = second_selection.selection_set.selections
# make sure the third level has the correct name and arguments
assert len(third_selection_set) == 1 and third_selection_set[0].name.value == 'bar' \
and len(third_selection_set[0].arguments) == 1 \
and third_selection_set[0].arguments[0].name.value == 'arg' \
and third_selection_set[0].arguments[0].value.value == '2', (
"Third selection did not have the right requirements."
)
third_selection = third_selection_set[0]
fourth_selection_set = third_selection.selection_set.selections
# make sure we are asking for the id of the final select
assert len(fourth_selection_set) == 1 and fourth_selection_set[0].name.value == 'id', (
"Final selection was incorrect."
)
def test_graph_entity__find_id(self):
# a graph entity to test with
entity = GraphEntity(service=Mock(), model_type="user", id=1)
# the result to test against
result = {
'user': {
'foo': [
{'id': 1}
],
'bar': {
'id': 7,
'baz': [
{'id': 5}
]
},
'baz': {
'id': 8,
'bing': []
}
}
}
# make sure it can find the number 1 in the list
assert entity._find_id(result, 1), (
"Could not find id in GraphEntity result."
)
# make sure it can find the number 1 in the list
assert entity._find_id(result, 5), (
"Could not find id in GraphEntity result."
)
# make sure we don't have any false positives
assert not entity._find_id(result, 7), (
"Encountered false positive in GraphEntity._find_id."
)
# make sure we don't have any false positives
assert not entity._find_id(result, 8), (
"Encountered a complicated false positive in GraphEntity._find_id."
)
## Utilities
def _verify_crud_mutation(self, model, action):
# create the mutation
summarized = summarize_crud_mutation(model=model, method=action)
# make sure the name matches the convention
assert summarized['name'] == crud_mutation_name(model=model, action=action), (
"Summarized %s mutation did not have the right name." % action
)
# make sure the event is what we expect
assert summarized['event'] == get_crud_action(model=model, method=action), (
"Summarized %s mutation did not have the right event type." % action
)
|
class TestUtil(unittest.TestCase):
def test_create_model_schema(self):
pass
def test_generate_api_schema(self):
pass
def test_generate_api_schema_with_mutation(self):
pass
def test_graphql_type_from_summary(self):
pass
def test_graphql_type_from_summary_with_connections(self):
pass
def test_graphql_mutation_from_summary(self):
pass
def test_graphql_mutation_with_object_io_from_summary(self):
pass
def test_walk_query(self):
'''
This function is implicitly tested when checking parse_string
'''
pass
@async_test
async def test_parse_mutation_string(self):
pass
async def model_resolver(object_name, fields, **filters):
pass
async def connection_resolver(connection_name, object):
pass
async def mutation_resolver(mutation_name, args, fields):
pass
@async_test
async def test_parse_string(self):
pass
async def model_resolver(object_name, fields, **filters):
pass
async def connection_resolver(connection_name, object):
pass
async def mutation_resolver(mutation_name, args, fields):
pass
def test_fields_for_model(self):
pass
def test_can_summarize_mutation(self):
pass
def test_can_summarize_async_mutation(self):
pass
def test_mutation_io_summary(self):
pass
def test_mutation_required_io_summary(self):
pass
def test_can_summarize_crud_mutation(self):
pass
def test_can_summarize_crud_mutation(self):
pass
def test_can_summarize_crud_mutation(self):
pass
def test_convert_typestring_to_api_native(self):
pass
def test_serialize_native_type(self):
pass
def test_build_native_type_dictionary(self):
pass
def test_query_for_model_with_filters(self):
pass
def test_query_for_model_without_filters(self):
pass
def test_graph_entity_needs_to_start_somewhere(self):
pass
def test_graph_entity_maintains_source(self):
pass
def test_graph_entity_can_build_path(self):
pass
def test_graph_entity_can_build_path_with_args(self):
pass
def test_graph_entity_query(self):
pass
def test_graph_entity__find_id(self):
pass
def _verify_crud_mutation(self, model, action):
pass
| 39 | 1 | 13 | 1 | 9 | 3 | 1 | 0.33 | 1 | 7 | 2 | 0 | 30 | 0 | 30 | 102 | 514 | 95 | 316 | 103 | 277 | 103 | 154 | 101 | 116 | 2 | 2 | 1 | 37 |
3,696 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/tests/api/test_schema.py
|
tests.api.test_schema.TestUtil
|
class TestUtil(unittest.TestSuite):
def setUp(self):
# create a nautilus schema to test
self.schema = nautilus.api.Schema()
# create an ioloop to use
self.io_loop = self.get_new_ioloop()
def test_does_not_auto_camel_case(self):
# a query to test with a snake case field
class TestQuery(ObjectType):
test_field = String()
def resolve_test_field(self, args, info):
return 'hello'
# assign the query to the schema
self.schema.query = TestQuery
# the query to test
test_query = "query {test_field}"
# execute the query
resolved_query = self.schema.execute(test_query)
assert 'test_field' in resolved_query.data, (
"Schema did not have snake_case field."
)
assert resolved_query.data['test_field'] == 'hello', (
"Snake_case field did not have the right value"
)
|
class TestUtil(unittest.TestSuite):
def setUp(self):
pass
def test_does_not_auto_camel_case(self):
pass
class TestQuery(ObjectType):
def resolve_test_field(self, args, info):
pass
| 5 | 0 | 11 | 2 | 6 | 2 | 1 | 0.33 | 1 | 1 | 1 | 0 | 2 | 2 | 2 | 22 | 33 | 9 | 18 | 10 | 13 | 6 | 14 | 10 | 9 | 1 | 3 | 0 | 3 |
3,697 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/tests/api/test_filter.py
|
tests.api.test_filter.TestUtil
|
class TestUtil(unittest.TestCase):
def setUp(self):
# point the database to a in-memory sqlite database
nautilus.database.init_db('sqlite:///test.db')
# save a reference to the test model
self.model = MockModel()
# generate the arguments for the model
self.args = args_for_model(self.model)
# create a set out of the arguments
self.arg_names = set(self.args.keys())
# create a database table to test on
self.model.create_table()
# generate test data
self._gen_testdata()
def tearDown(self):
# remove the test table
self.model.drop_table()
def test_args_match_model(self):
# make sure the argument contain the model fields
assert self.arg_names >= {field.name for field in self.model.fields()}, (
"Generated args do not contain model fields"
)
def test_pk_filter(self): pass
def test_pk_filter_with_custom_pk(self): pass
def test_pk_in_filter(self): pass
def test_args_has_oneof_filter(self):
# the filters we would exepect for the contains arg
contains_filter_args = {'name_in', 'date_in'}
# make sure the arguments exist for the contains filter
assert self.arg_names >= contains_filter_args, (
"Generated args do not have contains filter."
)
def test_can_filter_by_field(self):
# the argument to filter for
filter_args = dict(name='foo1')
# filter the models
records_filtered = filter_model(self.model, filter_args)
# make sure only one record was returned
assert len(records_filtered) == 1, (
"More than one record was returned by filter."
)
# pull out the retrieved record
retrieved_record_name = records_filtered[0].name
# make sure the first name matches
expected = 'foo1'
assert retrieved_record_name == expected, (
"Got %(retrieved_record_name)s instead of %(expected)s" % locals()
)
def test_can_filter_by_contains(self):
# the argument to filter for
filter_args = dict(name_in=['foo1', 'foo2'])
# filter the models
records_filtered = filter_model(self.model, filter_args)
# make sure only one record was returned
assert len(records_filtered) == 2, (
"More than one record was returned by filter."
)
# figure out the names of the records we retrieved
retrieved_names = {record.name for record in records_filtered}
# make sure the first name matches
expected = {'foo1', 'foo2'}
assert retrieved_names == expected, (
"Got %(retrieved_names)s instead of %(expected)s" % locals()
)
def test_can_handle_first(self):
# the argument to filter for
filter_args = dict(first=2, offset=0)
# filter the models
records_filtered = filter_model(self.model, filter_args)
# figure out the names of the records we retrieved
retrieved_names = [record.name for record in records_filtered]
expected = ['foo1', 'bar1']
assert retrieved_names == expected, (
"Got %(retrieved_names)s instead of %(expected)s" % locals()
)
def test_can_handle_last(self):
# the argument to filter for
filter_args = dict(last=2, offset=0)
# filter the models
records_filtered = filter_model(self.model, filter_args)
# figure out the names of the records we retrieved
retrieved_names = [record.name for record in records_filtered]
expected = ['bar10', 'foo10']
assert retrieved_names == expected, (
"Got %(retrieved_names)s instead of %(expected)s" % locals()
)
def test_can_handle_last_offset(self):
# the argument to filter for
filter_args = dict(last=2, offset=2)
# filter the models
records_filtered = filter_model(self.model, filter_args)
# figure out the names of the records we retrieved
retrieved_names = [record.name for record in records_filtered]
expected = ['bar9', 'foo9']
assert retrieved_names == expected, (
"Got %(retrieved_names)s instead of %(expected)s" % locals()
)
def test_can_handle_first_offset(self):
# the argument to filter for
filter_args = dict(first=4, offset=2)
# filter the models
records_filtered = filter_model(self.model, filter_args)
# figure out the names of the records we retrieved
retrieved_names = [record.name for record in records_filtered]
expected = ['foo2', 'bar2', 'foo3', 'bar3']
assert retrieved_names == expected, (
"Got %(retrieved_names)s instead of %(expected)s" % locals()
)
def test_can_handle_first_offset_order_by(self):
# the argument to filter for
filter_args = dict(first=4, offset=2, order_by=["date", "-name"])
# filter the models
records_filtered = filter_model(self.model, filter_args)
# figure out the names of the records we retrieved
retrieved_names = [record.name for record in records_filtered]
expected = ['foo7', 'foo6', 'foo5', 'foo4']
assert retrieved_names == expected, (
"Got %(retrieved_names)s instead of %(expected)s" % locals()
)
def _gen_testdata(self):
# some test records
for i in range(10):
self.model(name='foo%s' % (i+1), date='bar').save()
self.model(name='bar%s' % (i+1), date='foo').save()
|
class TestUtil(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_args_match_model(self):
pass
def test_pk_filter(self):
pass
def test_pk_filter_with_custom_pk(self):
pass
def test_pk_in_filter(self):
pass
def test_args_has_oneof_filter(self):
pass
def test_can_filter_by_field(self):
pass
def test_can_filter_by_contains(self):
pass
def test_can_handle_first(self):
pass
def test_can_handle_last(self):
pass
def test_can_handle_last_offset(self):
pass
def test_can_handle_first_offset(self):
pass
def test_can_handle_first_offset_order_by(self):
pass
def _gen_testdata(self):
pass
| 16 | 0 | 9 | 1 | 6 | 2 | 1 | 0.41 | 1 | 3 | 0 | 0 | 15 | 3 | 15 | 87 | 166 | 42 | 88 | 49 | 75 | 36 | 69 | 49 | 53 | 2 | 2 | 1 | 16 |
3,698 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/tests/util/tests/test_mock.py
|
test_mock.TestUtil
|
class TestUtil(unittest.TestCase):
def setUp(self):
# create a mock
self.mock = Mock()
def test_must_be_called_more_than_once(self):
try:
# check that the mock has been called
self.mock.assert_called()
# it throws an assertion error
except AssertionError:
pass
def test_default_fails_multiple_calls(self):
# call the mock twice
self.mock()
self.mock()
# expect this check to fail
try:
# check that the mock has been called
self.mock.assert_called()
# it throws an assertion error
except AssertionError:
pass
def test_can_check_for_args(self):
# pass some args to the mock
self.mock('bar', 'baz')
# verify that the mock was called with the args
self.mock.assert_called('bar', 'baz')
def test_can_check_for_kwds(self):
# pass some kwds to the mock
self.mock(foo='bar')
# verify that th emock was called with the args
self.mock.assert_called(foo='bar')
|
class TestUtil(unittest.TestCase):
def setUp(self):
pass
def test_must_be_called_more_than_once(self):
pass
def test_default_fails_multiple_calls(self):
pass
def test_can_check_for_args(self):
pass
def test_can_check_for_kwds(self):
pass
| 6 | 0 | 6 | 0 | 4 | 2 | 1 | 0.52 | 1 | 2 | 1 | 0 | 5 | 1 | 5 | 77 | 41 | 9 | 21 | 7 | 15 | 11 | 21 | 7 | 15 | 2 | 2 | 1 | 7 |
3,699 |
AlecAivazis/graphql-over-kafka
|
AlecAivazis_graphql-over-kafka/example/recipes.py
|
recipes.RecipeService
|
class RecipeService(ModelService):
model = Recipe
config = ServiceConfig
|
class RecipeService(ModelService):
pass
| 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0 | 3 | 3 | 2 | 0 | 3 | 3 | 2 | 0 | 1 | 0 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.