repo_id
stringclasses 208
values | file_path
stringlengths 31
190
| content
stringlengths 1
2.65M
| __index_level_0__
int64 0
0
|
---|---|---|---|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/interval.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
from aniso8601.builders import TupleBuilder
from aniso8601.builders.python import PythonTimeBuilder
from aniso8601.date import parse_date
from aniso8601.duration import parse_duration
from aniso8601.exceptions import ISOFormatError
from aniso8601.time import parse_datetime
def parse_interval(isointervalstr, intervaldelimiter='/',
datetimedelimiter='T', builder=PythonTimeBuilder):
#Given a string representing an ISO 8601 interval, return an
#interval built by the given builder. Valid formats are:
#
#<start>/<end>
#<start>/<duration>
#<duration>/<end>
#
#The <start> and <end> values can represent dates, or datetimes,
#not times.
#
#The format:
#
#<duration>
#
#Is expressly not supported as there is no way to provide the additional
#required context.
if isointervalstr[0] == 'R':
raise ISOFormatError('ISO 8601 repeating intervals must be parsed '
'with parse_repeating_interval.')
return _parse_interval(isointervalstr, builder,
intervaldelimiter, datetimedelimiter)
def parse_repeating_interval(isointervalstr, intervaldelimiter='/',
datetimedelimiter='T', builder=PythonTimeBuilder):
#Given a string representing an ISO 8601 interval repeating, return an
#interval built by the given builder. Valid formats are:
#
#Rnn/<interval>
#R/<interval>
if isointervalstr[0] != 'R':
raise ISOFormatError('ISO 8601 repeating interval must start '
'with an R.')
#Parse the number of iterations
iterationpart, intervalpart = isointervalstr.split(intervaldelimiter, 1)
if len(iterationpart) > 1:
R = False
Rnn = iterationpart[1:]
else:
R = True
Rnn = None
interval = _parse_interval(intervalpart, TupleBuilder,
intervaldelimiter, datetimedelimiter)
return builder.build_repeating_interval(R=R, Rnn=Rnn, interval=interval)
def _parse_interval(isointervalstr, builder, intervaldelimiter='/',
datetimedelimiter='T'):
#Returns a tuple containing the start of the interval, the end of the
#interval, and or the interval duration
firstpart, secondpart = isointervalstr.split(intervaldelimiter)
if firstpart[0] == 'P':
#<duration>/<end>
#Notice that these are not returned 'in order' (earlier to later), this
#is to maintain consistency with parsing <start>/<end> durations, as
#well as making repeating interval code cleaner. Users who desire
#durations to be in order can use the 'sorted' operator.
#We need to figure out if <end> is a date, or a datetime
if secondpart.find(datetimedelimiter) != -1:
#<end> is a datetime
duration = parse_duration(firstpart, builder=TupleBuilder)
enddatetime = parse_datetime(secondpart,
delimiter=datetimedelimiter,
builder=TupleBuilder)
return builder.build_interval(end=enddatetime,
duration=duration)
#<end> must just be a date
duration = parse_duration(firstpart, builder=TupleBuilder)
enddate = parse_date(secondpart, builder=TupleBuilder)
return builder.build_interval(end=enddate, duration=duration)
elif secondpart[0] == 'P':
#<start>/<duration>
#We need to figure out if <start> is a date, or a datetime
if firstpart.find(datetimedelimiter) != -1:
#<start> is a datetime
duration = parse_duration(secondpart, builder=TupleBuilder)
startdatetime = parse_datetime(firstpart,
delimiter=datetimedelimiter,
builder=TupleBuilder)
return builder.build_interval(start=startdatetime,
duration=duration)
#<start> must just be a date
duration = parse_duration(secondpart, builder=TupleBuilder)
startdate = parse_date(firstpart, builder=TupleBuilder)
return builder.build_interval(start=startdate,
duration=duration)
#<start>/<end>
if (firstpart.find(datetimedelimiter) != -1
and secondpart.find(datetimedelimiter) != -1):
#Both parts are datetimes
start_datetime = parse_datetime(firstpart,
delimiter=datetimedelimiter,
builder=TupleBuilder)
end_datetime = parse_datetime(secondpart,
delimiter=datetimedelimiter,
builder=TupleBuilder)
return builder.build_interval(start=start_datetime,
end=end_datetime)
elif (firstpart.find(datetimedelimiter) != -1
and secondpart.find(datetimedelimiter) == -1):
#First part is a datetime, second part is a date
start_datetime = parse_datetime(firstpart,
delimiter=datetimedelimiter,
builder=TupleBuilder)
end_date = parse_date(secondpart, builder=TupleBuilder)
return builder.build_interval(start=start_datetime,
end=end_date)
elif (firstpart.find(datetimedelimiter) == -1
and secondpart.find(datetimedelimiter) != -1):
#First part is a date, second part is a datetime
start_date = parse_date(firstpart, builder=TupleBuilder)
end_datetime = parse_datetime(secondpart,
delimiter=datetimedelimiter,
builder=TupleBuilder)
return builder.build_interval(start=start_date,
end=end_datetime)
#Both parts are dates
start_date = parse_date(firstpart, builder=TupleBuilder)
end_date = parse_date(secondpart, builder=TupleBuilder)
return builder.build_interval(start=start_date, end=end_date)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/duration.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
from aniso8601 import compat
from aniso8601.builders import TupleBuilder
from aniso8601.builders.python import PythonTimeBuilder
from aniso8601.date import parse_date
from aniso8601.decimalfraction import find_separator, normalize
from aniso8601.exceptions import ISOFormatError, NegativeDurationError
from aniso8601.time import parse_time
def parse_duration(isodurationstr, builder=PythonTimeBuilder):
#Given a string representing an ISO 8601 duration, return a
#a duration built by the given builder. Valid formats are:
#
#PnYnMnDTnHnMnS (or any reduced precision equivalent)
#P<date>T<time>
if isodurationstr[0] != 'P':
raise ISOFormatError('ISO 8601 duration must start with a P.')
#If Y, M, D, H, S, or W are in the string,
#assume it is a specified duration
if _has_any_component(isodurationstr,
['Y', 'M', 'D', 'H', 'S', 'W']) is True:
return _parse_duration_prescribed(isodurationstr, builder)
return _parse_duration_combined(isodurationstr, builder)
def _parse_duration_prescribed(durationstr, builder):
#durationstr can be of the form PnYnMnDTnHnMnS or PnW
#Don't allow negative elements
#https://bitbucket.org/nielsenb/aniso8601/issues/20/negative-duration
if durationstr.find('-') != -1:
raise NegativeDurationError('ISO 8601 durations must be positive.')
#Make sure the end character is valid
#https://bitbucket.org/nielsenb/aniso8601/issues/9/durations-with-trailing-garbage-are-parsed
if durationstr[-1] not in ['Y', 'M', 'D', 'H', 'S', 'W']:
raise ISOFormatError('ISO 8601 duration must end with a valid '
'character.')
#Make sure only the lowest order element has decimal precision
separator_index = find_separator(durationstr)
if separator_index != -1:
remaining = durationstr[separator_index + 1:]
if find_separator(remaining) != -1:
raise ISOFormatError('ISO 8601 allows only lowest order element to '
'have a decimal fraction.')
#There should only ever be 1 letter after a decimal if there is more
#then one, the string is invalid
lettercount = 0
for character in remaining:
if character.isalpha() is True:
lettercount += 1
if lettercount > 1:
raise ISOFormatError('ISO 8601 duration must end with '
'a single valid character.')
#Do not allow W in combination with other designators
#https://bitbucket.org/nielsenb/aniso8601/issues/2/week-designators-should-not-be-combinable
if (durationstr.find('W') != -1
and _has_any_component(durationstr,
['Y', 'M', 'D', 'H', 'S']) is True):
raise ISOFormatError('ISO 8601 week designators may not be combined '
'with other time designators.')
#Parse the elements of the duration
if durationstr.find('T') == -1:
return _parse_duration_prescribed_notime(durationstr, builder)
return _parse_duration_prescribed_time(durationstr, builder)
def _parse_duration_prescribed_notime(durationstr, builder):
#durationstr can be of the form PnYnMnD or PnW
#Don't allow negative elements
#https://bitbucket.org/nielsenb/aniso8601/issues/20/negative-duration
if durationstr.find('-') != -1:
raise NegativeDurationError('ISO 8601 durations must be positive.')
#Make sure no time portion is included
#https://bitbucket.org/nielsenb/aniso8601/issues/7/durations-with-time-components-before-t
if _has_any_component(durationstr, ['H', 'S']):
raise ISOFormatError('ISO 8601 time components not allowed in duration '
'without prescribed time.')
if _component_order_correct(durationstr,
['P', 'Y', 'M', 'D', 'W']) is False:
raise ISOFormatError('ISO 8601 duration components must be in the '
'correct order.')
if durationstr.find('Y') != -1:
yearstr = _parse_duration_element(durationstr, 'Y')
else:
yearstr = None
if durationstr.find('M') != -1:
monthstr = _parse_duration_element(durationstr, 'M')
else:
monthstr = None
if durationstr.find('W') != -1:
weekstr = _parse_duration_element(durationstr, 'W')
else:
weekstr = None
if durationstr.find('D') != -1:
daystr = _parse_duration_element(durationstr, 'D')
else:
daystr = None
return builder.build_duration(PnY=yearstr, PnM=monthstr,
PnW=weekstr, PnD=daystr)
def _parse_duration_prescribed_time(durationstr, builder):
#durationstr can be of the form PnYnMnDTnHnMnS
#Don't allow negative elements
#https://bitbucket.org/nielsenb/aniso8601/issues/20/negative-duration
if durationstr.find('-') != -1:
raise NegativeDurationError('ISO 8601 durations must be positive.')
firsthalf = durationstr[:durationstr.find('T')]
secondhalf = durationstr[durationstr.find('T'):]
#Make sure no time portion is included in the date half
#https://bitbucket.org/nielsenb/aniso8601/issues/7/durations-with-time-components-before-t
if _has_any_component(firsthalf, ['H', 'S']):
raise ISOFormatError('ISO 8601 time components not allowed in date '
'portion of duration.')
if _component_order_correct(firsthalf, ['P', 'Y', 'M', 'D', 'W']) is False:
raise ISOFormatError('ISO 8601 duration components must be in the '
'correct order.')
#Make sure no date component is included in the time half
if _has_any_component(secondhalf, ['Y', 'D']):
raise ISOFormatError('ISO 8601 time components not allowed in date '
'portion of duration.')
if _component_order_correct(secondhalf, ['T', 'H', 'M', 'S']) is False:
raise ISOFormatError('ISO 8601 time components in duration must be in '
'the correct order.')
if firsthalf.find('Y') != -1:
yearstr = _parse_duration_element(firsthalf, 'Y')
else:
yearstr = None
if firsthalf.find('M') != -1:
monthstr = _parse_duration_element(firsthalf, 'M')
else:
monthstr = None
if firsthalf.find('D') != -1:
daystr = _parse_duration_element(firsthalf, 'D')
else:
daystr = None
if secondhalf.find('H') != -1:
hourstr = _parse_duration_element(secondhalf, 'H')
else:
hourstr = None
if secondhalf.find('M') != -1:
minutestr = _parse_duration_element(secondhalf, 'M')
else:
minutestr = None
if secondhalf.find('S') != -1:
secondstr = _parse_duration_element(secondhalf, 'S')
else:
secondstr = None
return builder.build_duration(PnY=yearstr, PnM=monthstr, PnD=daystr,
TnH=hourstr, TnM=minutestr, TnS=secondstr)
def _parse_duration_combined(durationstr, builder):
#Period of the form P<date>T<time>
#Split the string in to its component parts
datepart, timepart = durationstr[1:].split('T') #We skip the 'P'
datevalue = parse_date(datepart, builder=TupleBuilder)
timevalue = parse_time(timepart, builder=TupleBuilder)
return builder.build_duration(PnY=datevalue[0], PnM=datevalue[1],
PnD=datevalue[2], TnH=timevalue[0],
TnM=timevalue[1], TnS=timevalue[2])
def _parse_duration_element(durationstr, elementstr):
#Extracts the specified portion of a duration, for instance, given:
#durationstr = 'T4H5M6.1234S'
#elementstr = 'H'
#
#returns 4
#
#Note that the string must start with a character, so its assumed the
#full duration string would be split at the 'T'
durationstartindex = 0
durationendindex = durationstr.find(elementstr)
for characterindex in compat.range(durationendindex - 1, 0, -1):
if durationstr[characterindex].isalpha() is True:
durationstartindex = characterindex
break
durationstartindex += 1
return normalize(durationstr[durationstartindex:durationendindex])
def _has_any_component(durationstr, components):
#Given a duration string, and a list of components, returns True
#if any of the listed components are present, False otherwise.
#
#For instance:
#durationstr = 'P1Y'
#components = ['Y', 'M']
#
#returns True
#
#durationstr = 'P1Y'
#components = ['M', 'D']
#
#returns False
for component in components:
if durationstr.find(component) != -1:
return True
return False
def _component_order_correct(durationstr, componentorder):
#Given a duration string, and a list of components, returns
#True if the components are in the same order as the
#component order list, False otherwise. Characters that
#are present in the component order list but not in the
#duration string are ignored.
#
#https://bitbucket.org/nielsenb/aniso8601/issues/8/durations-with-components-in-wrong-order
#
#durationstr = 'P1Y1M1D'
#components = ['P', 'Y', 'M', 'D']
#
#returns True
#
#durationstr = 'P1Y1M'
#components = ['P', 'Y', 'M', 'D']
#
#returns True
#
#durationstr = 'P1D1Y1M'
#components = ['P', 'Y', 'M', 'D']
#
#returns False
componentindex = 0
for characterindex in compat.range(len(durationstr)):
character = durationstr[characterindex]
if character in componentorder:
#This is a character we need to check the order of
if character in componentorder[componentindex:]:
componentindex = componentorder.index(character)
else:
#A character is out of order
return False
return True
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
#Import the main parsing functions so they are readily available
from aniso8601.time import parse_datetime, parse_time, get_time_resolution
from aniso8601.date import parse_date, get_date_resolution
from aniso8601.duration import parse_duration
from aniso8601.interval import parse_interval, parse_repeating_interval
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/utcoffset.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import datetime
class UTCOffset(datetime.tzinfo):
def __init__(self, name=None, minutes=None):
#We build an offset in this manner since the
#tzinfo class must have an init
#"method that can be called with no arguments"
self._name = name
if minutes is not None:
self._utcdelta = datetime.timedelta(minutes=minutes)
else:
self._utcdelta = None
def __repr__(self):
if self._utcdelta >= datetime.timedelta(hours=0):
return '+{0} UTC'.format(self._utcdelta)
#From the docs:
#String representations of timedelta objects are normalized
#similarly to their internal representation. This leads to
#somewhat unusual results for negative timedeltas.
#Clean this up for printing purposes
#Negative deltas start at -1 day
correcteddays = abs(self._utcdelta.days + 1)
#Negative deltas have a positive seconds
deltaseconds = (24 * 60 * 60) - self._utcdelta.seconds
#(24 hours / day) * (60 minutes / hour) * (60 seconds / hour)
days, remainder = divmod(deltaseconds, 24 * 60 * 60)
#(1 hour) * (60 minutes / hour) * (60 seconds / hour)
hours, remainder = divmod(remainder, 1 * 60 * 60)
#(1 minute) * (60 seconds / minute)
minutes, seconds = divmod(remainder, 1 * 60)
#Add any remaining days to the correcteddays count
correcteddays += days
if correcteddays == 0:
return '-{0}:{1:02}:{2:02} UTC'.format(hours, minutes, seconds)
elif correcteddays == 1:
return '-1 day, {0}:{1:02}:{2:02} UTC'.format(hours,
minutes,
seconds)
return '-{0} days, {1}:{2:02}:{3:02} UTC'.format(correcteddays,
hours,
minutes,
seconds)
def utcoffset(self, dt):
return self._utcdelta
def tzname(self, dt):
return self._name
def dst(self, dt):
#ISO 8601 specifies offsets should be different if DST is required,
#instead of allowing for a DST to be specified
# https://docs.python.org/2/library/datetime.html#datetime.tzinfo.dst
return datetime.timedelta(0)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/exceptions.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
class ISOFormatError(ValueError):
"""Raised when ISO 8601 string fails a format check."""
class NegativeDurationError(ValueError):
"""Raised when a duration is negative."""
class YearOutOfBoundsError(ValueError):
"""Raised when year exceeds limits."""
class WeekOutOfBoundsError(ValueError):
"""Raised when week exceeds a year."""
class DayOutOfBoundsError(ValueError):
"""Raised when day is outside of 1..365, 1..366 for leap year."""
class HoursOutOfBoundsError(ValueError):
"""Raise when parsed hours are greater than 24."""
class MinutesOutOfBoundsError(ValueError):
"""Raise when parsed seconds are greater than 60."""
class SecondsOutOfBoundsError(ValueError):
"""Raise when parsed seconds are greater than 60."""
class MidnightBoundsError(ValueError):
"""Raise when parsed time has an hour of 24 but is not midnight."""
class LeapSecondError(NotImplementedError):
"""Raised when attempting to parse a leap second"""
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/resolution.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
from aniso8601 import compat
class DateResolution(object):
Year, Month, Week, Weekday, Day, Ordinal = list(compat.range(6))
class TimeResolution(object):
Seconds, Minutes, Hours = list(compat.range(3))
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/timezone.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
from aniso8601.builders.python import PythonTimeBuilder
from aniso8601.exceptions import ISOFormatError
def parse_timezone(tzstr, builder=PythonTimeBuilder):
#tzstr can be Z, ±hh:mm, ±hhmm, ±hh
if 'Z' in tzstr:
if len(tzstr) != 1:
raise ISOFormatError('"{0}" is not a valid ISO 8601 time offset.'
.format(tzstr))
return builder.build_timezone(negative=False, Z=True, name=tzstr)
elif len(tzstr) == 6:
#±hh:mm
hourstr = tzstr[1:3]
minutestr = tzstr[4:6]
if tzstr[0] == '-' and hourstr == '00' and minutestr == '00':
raise ISOFormatError('Negative ISO 8601 time offset must not '
'be 0.')
elif len(tzstr) == 5:
#±hhmm
hourstr = tzstr[1:3]
minutestr = tzstr[3:5]
if tzstr[0] == '-' and hourstr == '00' and minutestr == '00':
raise ISOFormatError('Negative ISO 8601 time offset must not '
'be 0.')
elif len(tzstr) == 3:
#±hh
hourstr = tzstr[1:3]
minutestr = None
if tzstr[0] == '-' and hourstr == '00':
raise ISOFormatError('Negative ISO 8601 time offset must not '
'be 0.')
else:
raise ISOFormatError('"{0}" is not a valid ISO 8601 time offset.'
.format(tzstr))
if tzstr[0] == '+':
return builder.build_timezone(negative=False, hh=hourstr,
mm=minutestr, name=tzstr)
elif tzstr[0] == '-':
return builder.build_timezone(negative=True, hh=hourstr,
mm=minutestr, name=tzstr)
raise ISOFormatError('"{0}" is not a valid ISO 8601 time offset.'
.format(tzstr))
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/date.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
from aniso8601.exceptions import ISOFormatError
from aniso8601.builders.python import PythonTimeBuilder
from aniso8601.resolution import DateResolution
def get_date_resolution(isodatestr):
#Valid string formats are:
#
#Y[YYY]
#YYYY-MM-DD
#YYYYMMDD
#YYYY-MM
#YYYY-Www
#YYYYWww
#YYYY-Www-D
#YYYYWwwD
#YYYY-DDD
#YYYYDDD
if isodatestr.startswith('+') or isodatestr.startswith('-'):
raise NotImplementedError('ISO 8601 extended year representation '
'not supported.')
if isodatestr[0].isdigit() is False or isodatestr[-1].isdigit() is False:
raise ISOFormatError('"{0}" is not a valid ISO 8601 date.'
.format(isodatestr))
if isodatestr.find('W') != -1:
#Handle ISO 8601 week date format
hyphens_present = 1 if isodatestr.find('-') != -1 else 0
week_date_len = 7 + hyphens_present
weekday_date_len = 8 + 2 * hyphens_present
if len(isodatestr) == week_date_len:
#YYYY-Www
#YYYYWww
return DateResolution.Week
elif len(isodatestr) == weekday_date_len:
#YYYY-Www-D
#YYYYWwwD
return DateResolution.Weekday
else:
raise ISOFormatError('"{0}" is not a valid ISO 8601 week date.'
.format(isodatestr))
#If the size of the string of 4 or less,
#assume its a truncated year representation
if len(isodatestr) <= 4:
return DateResolution.Year
#An ISO string may be a calendar represntation if:
# 1) When split on a hyphen, the sizes of the parts are 4, 2, 2 or 4, 2
# 2) There are no hyphens, and the length is 8
datestrsplit = isodatestr.split('-')
#Check case 1
if len(datestrsplit) == 2:
if len(datestrsplit[0]) == 4 and len(datestrsplit[1]) == 2:
return DateResolution.Month
if len(datestrsplit) == 3:
if (len(datestrsplit[0]) == 4
and len(datestrsplit[1]) == 2
and len(datestrsplit[2]) == 2):
return DateResolution.Day
#Check case 2
if len(isodatestr) == 8 and isodatestr.find('-') == -1:
return DateResolution.Day
#An ISO string may be a ordinal date representation if:
# 1) When split on a hyphen, the sizes of the parts are 4, 3
# 2) There are no hyphens, and the length is 7
#Check case 1
if len(datestrsplit) == 2:
if len(datestrsplit[0]) == 4 and len(datestrsplit[1]) == 3:
return DateResolution.Ordinal
#Check case 2
if len(isodatestr) == 7 and isodatestr.find('-') == -1:
return DateResolution.Ordinal
#None of the date representations match
raise ISOFormatError('"{0}" is not an ISO 8601 date, perhaps it '
'represents a time or datetime.'.format(isodatestr))
def parse_date(isodatestr, builder=PythonTimeBuilder):
#Given a string in any ISO 8601 date format, return a datetime.date
#object that corresponds to the given date. Valid string formats are:
#
#Y[YYY]
#YYYY-MM-DD
#YYYYMMDD
#YYYY-MM
#YYYY-Www
#YYYYWww
#YYYY-Www-D
#YYYYWwwD
#YYYY-DDD
#YYYYDDD
#
#Note that the ISO 8601 date format of ±YYYYY is expressly not supported
return _RESOLUTION_MAP[get_date_resolution(isodatestr)](isodatestr,
builder)
def _parse_year(yearstr, builder):
#yearstr is of the format Y[YYY]
return builder.build_date(YYYY=yearstr)
def _parse_calendar_day(datestr, builder):
#datestr is of the format YYYY-MM-DD or YYYYMMDD
if len(datestr) == 10:
#YYYY-MM-DD
yearstr = datestr[0:4]
monthstr = datestr[5:7]
daystr = datestr[8:]
elif len(datestr) == 8:
#YYYYMMDD
yearstr = datestr[0:4]
monthstr = datestr[4:6]
daystr = datestr[6:]
else:
raise ISOFormatError('"{0}" is not a valid ISO 8601 calendar day.'
.format(datestr))
return builder.build_date(YYYY=yearstr, MM=monthstr, DD=daystr)
def _parse_calendar_month(datestr, builder):
#datestr is of the format YYYY-MM
if len(datestr) != 7:
raise ISOFormatError('"{0}" is not a valid ISO 8601 calendar month.'
.format(datestr))
yearstr = datestr[0:4]
monthstr = datestr[5:]
return builder.build_date(YYYY=yearstr, MM=monthstr)
def _parse_week_day(datestr, builder):
#datestr is of the format YYYY-Www-D, YYYYWwwD
#
#W is the week number prefix, ww is the week number, between 1 and 53
#0 is not a valid week number, which differs from the Python implementation
#
#D is the weekday number, between 1 and 7, which differs from the Python
#implementation which is between 0 and 6
yearstr = datestr[0:4]
#Week number will be the two characters after the W
windex = datestr.find('W')
weekstr = datestr[windex + 1:windex + 3]
if datestr.find('-') != -1 and len(datestr) == 10:
#YYYY-Www-D
daystr = datestr[9:10]
elif len(datestr) == 8:
#YYYYWwwD
daystr = datestr[7:8]
else:
raise ISOFormatError('"{0}" is not a valid ISO 8601 week date.'
.format(datestr))
return builder.build_date(YYYY=yearstr, Www=weekstr, D=daystr)
def _parse_week(datestr, builder):
#datestr is of the format YYYY-Www, YYYYWww
#
#W is the week number prefix, ww is the week number, between 1 and 53
#0 is not a valid week number, which differs from the Python implementation
yearstr = datestr[0:4]
#Week number will be the two characters after the W
windex = datestr.find('W')
weekstr = datestr[windex + 1:windex + 3]
return builder.build_date(YYYY=yearstr, Www=weekstr)
def _parse_ordinal_date(datestr, builder):
#datestr is of the format YYYY-DDD or YYYYDDD
#DDD can be from 1 - 36[5,6], this matches Python's definition
yearstr = datestr[0:4]
if datestr.find('-') != -1:
#YYYY-DDD
daystr = datestr[(datestr.find('-') + 1):]
else:
#YYYYDDD
daystr = datestr[4:]
return builder.build_date(YYYY=yearstr, DDD=daystr)
_RESOLUTION_MAP = {
DateResolution.Day: _parse_calendar_day,
DateResolution.Ordinal: _parse_ordinal_date,
DateResolution.Month: _parse_calendar_month,
DateResolution.Week: _parse_week,
DateResolution.Weekday: _parse_week_day,
DateResolution.Year: _parse_year
}
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/builders/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
from aniso8601.exceptions import ISOFormatError
class BaseTimeBuilder(object):
@classmethod
def build_date(cls, YYYY=None, MM=None, DD=None, Www=None, D=None,
DDD=None):
raise NotImplementedError
@classmethod
def build_time(cls, hh=None, mm=None, ss=None, tz=None):
raise NotImplementedError
@classmethod
def build_datetime(cls, date, time):
raise NotImplementedError
@classmethod
def build_duration(cls, PnY=None, PnM=None, PnW=None, PnD=None, TnH=None,
TnM=None, TnS=None):
raise NotImplementedError
@classmethod
def build_interval(cls, start=None, end=None, duration=None):
#start, end, and duration are all tuples
raise NotImplementedError
@classmethod
def build_repeating_interval(cls, R=None, Rnn=None, interval=None):
#interval is a tuple
raise NotImplementedError
@classmethod
def build_timezone(cls, negative=None, Z=None, hh=None, mm=None, name=''):
raise NotImplementedError
@staticmethod
def cast(value, castfunction, caughtexceptions=(ValueError,),
thrownexception=ISOFormatError, thrownmessage=None):
try:
result = castfunction(value)
except caughtexceptions:
raise thrownexception(thrownmessage)
return result
@classmethod
def _build_object(cls, parsetuple):
#Given a TupleBuilder tuple, build the correct object
if parsetuple[-1] == 'date':
return cls.build_date(YYYY=parsetuple[0], MM=parsetuple[1],
DD=parsetuple[2], Www=parsetuple[3],
D=parsetuple[4], DDD=parsetuple[5])
elif parsetuple[-1] == 'time':
return cls.build_time(hh=parsetuple[0], mm=parsetuple[1],
ss=parsetuple[2], tz=parsetuple[3])
elif parsetuple[-1] == 'datetime':
return cls.build_datetime(parsetuple[0], parsetuple[1])
elif parsetuple[-1] == 'duration':
return cls.build_duration(PnY=parsetuple[0], PnM=parsetuple[1],
PnW=parsetuple[2], PnD=parsetuple[3],
TnH=parsetuple[4], TnM=parsetuple[5],
TnS=parsetuple[6])
elif parsetuple[-1] == 'interval':
return cls.build_interval(start=parsetuple[0], end=parsetuple[1],
duration=parsetuple[2])
elif parsetuple[-1] == 'repeatinginterval':
return cls.build_repeating_interval(R=parsetuple[0],
Rnn=parsetuple[1],
interval=parsetuple[2])
return cls.build_timezone(negative=parsetuple[0], Z=parsetuple[1],
hh=parsetuple[2], mm=parsetuple[3],
name=parsetuple[4])
class TupleBuilder(BaseTimeBuilder):
#Builder used to return the arguments as a tuple, cleans up some parse methods
@classmethod
def build_date(cls, YYYY=None, MM=None, DD=None, Www=None, D=None,
DDD=None):
return (YYYY, MM, DD, Www, D, DDD, 'date')
@classmethod
def build_time(cls, hh=None, mm=None, ss=None, tz=None):
return (hh, mm, ss, tz, 'time')
@classmethod
def build_datetime(cls, date, time):
return (date, time, 'datetime')
@classmethod
def build_duration(cls, PnY=None, PnM=None, PnW=None, PnD=None, TnH=None,
TnM=None, TnS=None):
return (PnY, PnM, PnW, PnD, TnH, TnM, TnS, 'duration')
@classmethod
def build_interval(cls, start=None, end=None, duration=None):
return (start, end, duration, 'interval')
@classmethod
def build_repeating_interval(cls, R=None, Rnn=None, interval=None):
return (R, Rnn, interval, 'repeatinginterval')
@classmethod
def build_timezone(cls, negative=None, Z=None, hh=None, mm=None, name=''):
return (negative, Z, hh, mm, name, 'timezone')
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/builders/python.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import datetime
from aniso8601.builders import BaseTimeBuilder, TupleBuilder
from aniso8601.exceptions import (DayOutOfBoundsError,
HoursOutOfBoundsError,
LeapSecondError, MidnightBoundsError,
MinutesOutOfBoundsError,
SecondsOutOfBoundsError,
WeekOutOfBoundsError, YearOutOfBoundsError)
from aniso8601.utcoffset import UTCOffset
MICROSECONDS_PER_SECOND = int(1e6)
MICROSECONDS_PER_MINUTE = 60 * MICROSECONDS_PER_SECOND
MICROSECONDS_PER_HOUR = 60 * MICROSECONDS_PER_MINUTE
MICROSECONDS_PER_DAY = 24 * MICROSECONDS_PER_HOUR
MICROSECONDS_PER_WEEK = 7 * MICROSECONDS_PER_DAY
MICROSECONDS_PER_MONTH = 30 * MICROSECONDS_PER_DAY
MICROSECONDS_PER_YEAR = 365 * MICROSECONDS_PER_DAY
class PythonTimeBuilder(BaseTimeBuilder):
@classmethod
def build_date(cls, YYYY=None, MM=None, DD=None, Www=None, D=None,
DDD=None):
if YYYY is not None:
#Truncated dates, like '19', refer to 1900-1999 inclusive,
#we simply parse to 1900
if len(YYYY) < 4:
#Shift 0s in from the left to form complete year
YYYY = YYYY.ljust(4, '0')
year = cls.cast(YYYY, int,
thrownmessage='Invalid year string.')
if MM is not None:
month = cls.cast(MM, int,
thrownmessage='Invalid month string.')
else:
month = 1
if DD is not None:
day = cls.cast(DD, int,
thrownmessage='Invalid day string.')
else:
day = 1
if Www is not None:
weeknumber = cls.cast(Www, int,
thrownmessage='Invalid week string.')
if weeknumber == 0 or weeknumber > 53:
raise WeekOutOfBoundsError('Week number must be between '
'1..53.')
else:
weeknumber = None
if DDD is not None:
dayofyear = cls.cast(DDD, int,
thrownmessage='Invalid day string.')
else:
dayofyear = None
if D is not None:
dayofweek = cls.cast(D, int,
thrownmessage='Invalid day string.')
if dayofweek == 0 or dayofweek > 7:
raise DayOutOfBoundsError('Weekday number must be between '
'1..7.')
else:
dayofweek = None
#0000 (1 BC) is not representable as a Python date so a ValueError is
#raised
if year == 0:
raise YearOutOfBoundsError('Year must be between 1..9999.')
if dayofyear is not None:
return PythonTimeBuilder._build_ordinal_date(year, dayofyear)
if weeknumber is not None:
return PythonTimeBuilder._build_week_date(year, weeknumber,
isoday=dayofweek)
return datetime.date(year, month, day)
@classmethod
def build_time(cls, hh=None, mm=None, ss=None, tz=None):
#Builds a time from the given parts, handling fractional arguments
#where necessary
hours = 0
minutes = 0
seconds = 0
microseconds = 0
if hh is not None:
if '.' in hh:
hours, remainingmicroseconds = cls._split_to_microseconds(hh, MICROSECONDS_PER_HOUR, 'Invalid hour string.')
microseconds += remainingmicroseconds
else:
hours = cls.cast(hh, int,
thrownmessage='Invalid hour string.')
if mm is not None:
if '.' in mm:
minutes, remainingmicroseconds = cls._split_to_microseconds(mm, MICROSECONDS_PER_MINUTE, 'Invalid minute string.')
microseconds += remainingmicroseconds
else:
minutes = cls.cast(mm, int,
thrownmessage='Invalid minute string.')
if ss is not None:
if '.' in ss:
seconds, remainingmicroseconds = cls._split_to_microseconds(ss, MICROSECONDS_PER_SECOND, 'Invalid second string.')
microseconds += remainingmicroseconds
else:
seconds = cls.cast(ss, int,
thrownmessage='Invalid second string.')
hours, minutes, seconds, microseconds = PythonTimeBuilder._distribute_microseconds(microseconds, (hours, minutes, seconds), (MICROSECONDS_PER_HOUR, MICROSECONDS_PER_MINUTE, MICROSECONDS_PER_SECOND))
#Range checks
if hours == 23 and minutes == 59 and seconds == 60:
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
raise LeapSecondError('Leap seconds are not supported.')
if (hours == 24
and (minutes != 0 or seconds != 0)):
raise MidnightBoundsError('Hour 24 may only represent midnight.')
if hours > 24:
raise HoursOutOfBoundsError('Hour must be between 0..24 with '
'24 representing midnight.')
if minutes >= 60:
raise MinutesOutOfBoundsError('Minutes must be less than 60.')
if seconds >= 60:
raise SecondsOutOfBoundsError('Seconds must be less than 60.')
#Fix ranges that have passed range checks
if hours == 24:
hours = 0
minutes = 0
seconds = 0
#Datetimes don't handle fractional components, so we use a timedelta
if tz is not None:
return (datetime.datetime(1, 1, 1,
hour=hours,
minute=minutes,
tzinfo=cls._build_object(tz))
+ datetime.timedelta(seconds=seconds,
microseconds=microseconds)
).timetz()
return (datetime.datetime(1, 1, 1,
hour=hours,
minute=minutes)
+ datetime.timedelta(seconds=seconds,
microseconds=microseconds)
).time()
@classmethod
def build_datetime(cls, date, time):
return datetime.datetime.combine(cls._build_object(date),
cls._build_object(time))
@classmethod
def build_duration(cls, PnY=None, PnM=None, PnW=None, PnD=None, TnH=None,
TnM=None, TnS=None):
years = 0
months = 0
days = 0
weeks = 0
hours = 0
minutes = 0
seconds = 0
microseconds = 0
if PnY is not None:
if '.' in PnY:
years, remainingmicroseconds = cls._split_to_microseconds(PnY, MICROSECONDS_PER_YEAR, 'Invalid year string.')
microseconds += remainingmicroseconds
else:
years = cls.cast(PnY, int,
thrownmessage='Invalid year string.')
if PnM is not None:
if '.' in PnM:
months, remainingmicroseconds = cls._split_to_microseconds(PnM, MICROSECONDS_PER_MONTH, 'Invalid month string.')
microseconds += remainingmicroseconds
else:
months = cls.cast(PnM, int,
thrownmessage='Invalid month string.')
if PnW is not None:
if '.' in PnW:
weeks, remainingmicroseconds = cls._split_to_microseconds(PnW, MICROSECONDS_PER_WEEK, 'Invalid week string.')
microseconds += remainingmicroseconds
else:
weeks = cls.cast(PnW, int,
thrownmessage='Invalid week string.')
if PnD is not None:
if '.' in PnD:
days, remainingmicroseconds = cls._split_to_microseconds(PnD, MICROSECONDS_PER_DAY, 'Invalid day string.')
microseconds += remainingmicroseconds
else:
days = cls.cast(PnD, int,
thrownmessage='Invalid day string.')
if TnH is not None:
if '.' in TnH:
hours, remainingmicroseconds = cls._split_to_microseconds(TnH, MICROSECONDS_PER_HOUR, 'Invalid hour string.')
microseconds += remainingmicroseconds
else:
hours = cls.cast(TnH, int,
thrownmessage='Invalid hour string.')
if TnM is not None:
if '.' in TnM:
minutes, remainingmicroseconds = cls._split_to_microseconds(TnM, MICROSECONDS_PER_MINUTE, 'Invalid minute string.')
microseconds += remainingmicroseconds
else:
minutes = cls.cast(TnM, int,
thrownmessage='Invalid minute string.')
if TnS is not None:
if '.' in TnS:
seconds, remainingmicroseconds = cls._split_to_microseconds(TnS, MICROSECONDS_PER_SECOND, 'Invalid second string.')
microseconds += remainingmicroseconds
else:
seconds = cls.cast(TnS, int,
thrownmessage='Invalid second string.')
years, months, weeks, days, hours, minutes, seconds, microseconds = PythonTimeBuilder._distribute_microseconds(microseconds, (years, months, weeks, days, hours, minutes, seconds), (MICROSECONDS_PER_YEAR, MICROSECONDS_PER_MONTH, MICROSECONDS_PER_WEEK, MICROSECONDS_PER_DAY, MICROSECONDS_PER_HOUR, MICROSECONDS_PER_MINUTE, MICROSECONDS_PER_SECOND))
#Note that weeks can be handled without conversion to days
totaldays = years * 365 + months * 30 + days
return datetime.timedelta(days=totaldays,
seconds=seconds,
microseconds=microseconds,
minutes=minutes,
hours=hours,
weeks=weeks)
@classmethod
def build_interval(cls, start=None, end=None, duration=None):
if start is not None and end is not None:
#<start>/<end>
startobject = cls._build_object(start)
endobject = cls._build_object(end)
return (startobject, endobject)
durationobject = cls._build_object(duration)
#Determine if datetime promotion is required
datetimerequired = (duration[4] is not None
or duration[5] is not None
or duration[6] is not None
or durationobject.seconds != 0
or durationobject.microseconds != 0)
if end is not None:
#<duration>/<end>
endobject = cls._build_object(end)
if end[-1] == 'date' and datetimerequired is True:
#<end> is a date, and <duration> requires datetime resolution
return (endobject,
cls.build_datetime(end, TupleBuilder.build_time())
- durationobject)
return (endobject,
endobject
- durationobject)
#<start>/<duration>
startobject = cls._build_object(start)
if start[-1] == 'date' and datetimerequired is True:
#<start> is a date, and <duration> requires datetime resolution
return (startobject,
cls.build_datetime(start, TupleBuilder.build_time())
+ durationobject)
return (startobject,
startobject
+ durationobject)
@classmethod
def build_repeating_interval(cls, R=None, Rnn=None, interval=None):
startobject = None
endobject = None
if interval[0] is not None:
startobject = cls._build_object(interval[0])
if interval[1] is not None:
endobject = cls._build_object(interval[1])
if interval[2] is not None:
durationobject = cls._build_object(interval[2])
else:
durationobject = endobject - startobject
if R is True:
if startobject is not None:
return cls._date_generator_unbounded(startobject,
durationobject)
return cls._date_generator_unbounded(endobject,
-durationobject)
iterations = cls.cast(Rnn, int,
thrownmessage='Invalid iterations.')
if startobject is not None:
return cls._date_generator(startobject, durationobject, iterations)
return cls._date_generator(endobject, -durationobject, iterations)
@classmethod
def build_timezone(cls, negative=None, Z=None, hh=None, mm=None, name=''):
if Z is True:
#Z -> UTC
return UTCOffset(name='UTC', minutes=0)
if hh is not None:
tzhour = cls.cast(hh, int,
thrownmessage='Invalid hour string.')
else:
tzhour = 0
if mm is not None:
tzminute = cls.cast(mm, int,
thrownmessage='Invalid minute string.')
else:
tzminute = 0
if negative is True:
return UTCOffset(name=name, minutes=-(tzhour * 60 + tzminute))
return UTCOffset(name=name, minutes=tzhour * 60 + tzminute)
@staticmethod
def _build_week_date(isoyear, isoweek, isoday=None):
if isoday is None:
return (PythonTimeBuilder._iso_year_start(isoyear)
+ datetime.timedelta(weeks=isoweek - 1))
return (PythonTimeBuilder._iso_year_start(isoyear)
+ datetime.timedelta(weeks=isoweek - 1, days=isoday - 1))
@staticmethod
def _build_ordinal_date(isoyear, isoday):
#Day of year to a date
#https://stackoverflow.com/questions/2427555/python-question-year-and-day-of-year-to-date
builtdate = (datetime.date(isoyear, 1, 1)
+ datetime.timedelta(days=isoday - 1))
#Enforce ordinal day limitation
#https://bitbucket.org/nielsenb/aniso8601/issues/14/parsing-ordinal-dates-should-only-allow
if isoday == 0 or builtdate.year != isoyear:
raise DayOutOfBoundsError('Day of year must be from 1..365, '
'1..366 for leap year.')
return builtdate
@staticmethod
def _iso_year_start(isoyear):
#Given an ISO year, returns the equivalent of the start of the year
#on the Gregorian calendar (which is used by Python)
#Stolen from:
#http://stackoverflow.com/questions/304256/whats-the-best-way-to-find-the-inverse-of-datetime-isocalendar
#Determine the location of the 4th of January, the first week of
#the ISO year is the week containing the 4th of January
#http://en.wikipedia.org/wiki/ISO_week_date
fourth_jan = datetime.date(isoyear, 1, 4)
#Note the conversion from ISO day (1 - 7) and Python day (0 - 6)
delta = datetime.timedelta(days=fourth_jan.isoweekday() - 1)
#Return the start of the year
return fourth_jan - delta
@staticmethod
def _date_generator(startdate, timedelta, iterations):
currentdate = startdate
currentiteration = 0
while currentiteration < iterations:
yield currentdate
#Update the values
currentdate += timedelta
currentiteration += 1
@staticmethod
def _date_generator_unbounded(startdate, timedelta):
currentdate = startdate
while True:
yield currentdate
#Update the value
currentdate += timedelta
@classmethod
def _split_to_microseconds(cls, floatstr, conversion, thrownmessage):
#Splits a string with a decimal point into an int, and
#int representing the floating point remainder as a number
#of microseconds, determined by multiplying by conversion
intpart, floatpart = floatstr.split('.')
intvalue = cls.cast(intpart, int,
thrownmessage=thrownmessage)
preconvertedvalue = cls.cast(floatpart, int,
thrownmessage=thrownmessage)
convertedvalue = ((preconvertedvalue * conversion) //
(10 ** len(floatpart)))
return (intvalue, convertedvalue)
@staticmethod
def _distribute_microseconds(todistribute, recipients, reductions):
#Given a number of microseconds as int, a tuple of ints length n
#to distribute to, and a tuple of ints length n to divide todistribute
#by (from largest to smallest), returns a tuple of length n + 1, with
#todistribute divided across recipients using the reductions, with
#the final remainder returned as the final tuple member
results = []
remainder = todistribute
for index, reduction in enumerate(reductions):
additional, remainder = divmod(remainder, reduction)
results.append(recipients[index] + additional)
#Always return the remaining microseconds
results.append(remainder)
return tuple(results)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/builders
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/builders/tests/test_python.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import datetime
import unittest
from aniso8601 import compat
from aniso8601.exceptions import (DayOutOfBoundsError, HoursOutOfBoundsError,
ISOFormatError, LeapSecondError,
MidnightBoundsError, MinutesOutOfBoundsError,
SecondsOutOfBoundsError,
WeekOutOfBoundsError, YearOutOfBoundsError)
from aniso8601.builders.python import PythonTimeBuilder
from aniso8601.utcoffset import UTCOffset
class TestPythonTimeBuilder(unittest.TestCase):
def test_build_date(self):
testtuples = (({'YYYY': '2013', 'MM': None, 'DD': None, 'Www': None,
'D': None, 'DDD': None},
datetime.date(2013, 1, 1)),
({'YYYY': '0001', 'MM': None, 'DD': None, 'Www': None,
'D': None, 'DDD': None},
datetime.date(1, 1, 1)),
({'YYYY': '1900', 'MM': None, 'DD': None, 'Www': None,
'D': None, 'DDD': None},
datetime.date(1900, 1, 1)),
({'YYYY': '1981', 'MM': '04', 'DD': '05', 'Www': None,
'D': None, 'DDD': None},
datetime.date(1981, 4, 5)),
({'YYYY': '1981', 'MM': '04', 'DD': None, 'Www': None,
'D': None, 'DDD': None},
datetime.date(1981, 4, 1)),
({'YYYY': '1981', 'MM': None, 'DD': None, 'Www': None,
'D': None, 'DDD': '095'},
datetime.date(1981, 4, 5)),
({'YYYY': '1981', 'MM': None, 'DD': None, 'Www': None,
'D': None, 'DDD': '365'},
datetime.date(1981, 12, 31)),
({'YYYY': '1980', 'MM': None, 'DD': None, 'Www': None,
'D': None, 'DDD': '366'},
datetime.date(1980, 12, 31)),
#Make sure we shift in zeros
({'YYYY': '1', 'MM': None, 'DD': None, 'Www': None,
'D': None, 'DDD': None},
datetime.date(1000, 1, 1)),
({'YYYY': '12', 'MM': None, 'DD': None, 'Www': None,
'D': None, 'DDD': None},
datetime.date(1200, 1, 1)),
({'YYYY': '123', 'MM': None, 'DD': None, 'Www': None,
'D': None, 'DDD': None},
datetime.date(1230, 1, 1)))
for testtuple in testtuples:
result = PythonTimeBuilder.build_date(**testtuple[0])
self.assertEqual(result, testtuple[1])
#Test weekday
testtuples = (({'YYYY': '2004', 'MM': None, 'DD': None, 'Www': '53',
'D': None, 'DDD': None},
datetime.date(2004, 12, 27), 0),
({'YYYY': '2009', 'MM': None, 'DD': None, 'Www': '01',
'D': None, 'DDD': None},
datetime.date(2008, 12, 29), 0),
({'YYYY': '2010', 'MM': None, 'DD': None, 'Www': '01',
'D': None, 'DDD': None},
datetime.date(2010, 1, 4), 0),
({'YYYY': '2009', 'MM': None, 'DD': None, 'Www': '53',
'D': None, 'DDD': None},
datetime.date(2009, 12, 28), 0),
({'YYYY': '2009', 'MM': None, 'DD': None, 'Www': '01',
'D': '1', 'DDD': None},
datetime.date(2008, 12, 29), 0),
({'YYYY': '2009', 'MM': None, 'DD': None, 'Www': '53',
'D': '7', 'DDD': None},
datetime.date(2010, 1, 3), 6),
({'YYYY': '2010', 'MM': None, 'DD': None, 'Www': '01',
'D': '1', 'DDD': None},
datetime.date(2010, 1, 4), 0),
({'YYYY': '2004', 'MM': None, 'DD': None, 'Www': '53',
'D': '6', 'DDD': None},
datetime.date(2005, 1, 1), 5))
for testtuple in testtuples:
result = PythonTimeBuilder.build_date(**testtuple[0])
self.assertEqual(result, testtuple[1])
self.assertEqual(result.weekday(), testtuple[2])
def test_build_date_bounds_checking(self):
#0 isn't a valid week number
with self.assertRaises(WeekOutOfBoundsError):
PythonTimeBuilder.build_date(YYYY='2003', Www='00')
#Week must not be larger than 53
with self.assertRaises(WeekOutOfBoundsError):
PythonTimeBuilder.build_date(YYYY='2004', Www='54')
#0 isn't a valid day number
with self.assertRaises(DayOutOfBoundsError):
PythonTimeBuilder.build_date(YYYY='2001', Www='02', D='0')
#Day must not be larger than 7
with self.assertRaises(DayOutOfBoundsError):
PythonTimeBuilder.build_date(YYYY='2001', Www='02', D='8')
#0 isn't a valid year for a Python builder
with self.assertRaises(YearOutOfBoundsError):
PythonTimeBuilder.build_date(YYYY='0000')
with self.assertRaises(DayOutOfBoundsError):
PythonTimeBuilder.build_date(YYYY='1981', DDD='000')
#Day 366 is only valid on a leap year
with self.assertRaises(DayOutOfBoundsError):
PythonTimeBuilder.build_date(YYYY='1981', DDD='366')
#Day must me 365, or 366, not larger
with self.assertRaises(DayOutOfBoundsError):
PythonTimeBuilder.build_date(YYYY='1981', DDD='367')
def test_build_time(self):
testtuples = (({}, datetime.time()),
({'hh': '12.5'},
datetime.time(hour=12, minute=30)),
({'hh': '23.99999999997'},
datetime.time(hour=23, minute=59, second=59,
microsecond=999999)),
({'hh': '1', 'mm': '23'},
datetime.time(hour=1, minute=23)),
({'hh': '1', 'mm': '23.4567'},
datetime.time(hour=1, minute=23, second=27,
microsecond=402000)),
({'hh': '14', 'mm': '43.999999997'},
datetime.time(hour=14, minute=43, second=59,
microsecond=999999)),
({'hh': '1', 'mm': '23', 'ss': '45'},
datetime.time(hour=1, minute=23, second=45)),
({'hh': '23', 'mm': '21', 'ss': '28.512400'},
datetime.time(hour=23, minute=21, second=28,
microsecond=512400)),
({'hh': '01', 'mm': '03', 'ss': '11.858714'},
datetime.time(hour=1, minute=3, second=11,
microsecond=858714)),
({'hh': '14', 'mm': '43', 'ss': '59.9999997'},
datetime.time(hour=14, minute=43, second=59,
microsecond=999999)),
({'hh': '24'}, datetime.time(hour=0)),
({'hh': '24', 'mm': '00'}, datetime.time(hour=0)),
({'hh': '24', 'mm': '00', 'ss': '00'},
datetime.time(hour=0)),
({'tz': (False, None, '00', '00', 'UTC', 'timezone')},
datetime.time(tzinfo=UTCOffset(name='UTC', minutes=0))),
({'hh': '23', 'mm': '21', 'ss': '28.512400',
'tz': (False, None, '00', '00', '+00:00', 'timezone')},
datetime.time(hour=23, minute=21, second=28,
microsecond=512400,
tzinfo=UTCOffset(name='+00:00',
minutes=0))),
({'hh': '1', 'mm': '23',
'tz': (False, None, '01', '00', '+1', 'timezone')},
datetime.time(hour=1, minute=23,
tzinfo=UTCOffset(name='+1',
minutes=60))),
({'hh': '1', 'mm': '23.4567',
'tz': (True, None, '01', '00', '-1', 'timezone')},
datetime.time(hour=1, minute=23, second=27,
microsecond=402000,
tzinfo=UTCOffset(name='-1',
minutes=-60))),
({'hh': '23', 'mm': '21', 'ss': '28.512400',
'tz': (False, None, '01', '30', '+1:30', 'timezone')},
datetime.time(hour=23, minute=21, second=28,
microsecond=512400,
tzinfo=UTCOffset(name='+1:30',
minutes=90))),
({'hh': '23', 'mm': '21', 'ss': '28.512400',
'tz': (False, None, '11', '15', '+11:15', 'timezone')},
datetime.time(hour=23, minute=21, second=28,
microsecond=512400,
tzinfo=UTCOffset(name='+11:15',
minutes=675))),
({'hh': '23', 'mm': '21', 'ss': '28.512400',
'tz': (False, None, '12', '34', '+12:34', 'timezone')},
datetime.time(hour=23, minute=21, second=28,
microsecond=512400,
tzinfo=UTCOffset(name='+12:34',
minutes=754))),
({'hh': '23', 'mm': '21', 'ss': '28.512400',
'tz': (False, None, '00', '00', 'UTC', 'timezone')},
datetime.time(hour=23, minute=21, second=28,
microsecond=512400,
tzinfo=UTCOffset(name='UTC',
minutes=0))),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
#https://bitbucket.org/nielsenb/aniso8601/issues/21/sub-microsecond-precision-is-lost-when
({'hh': '14.9999999999999999'},
datetime.time(hour=14, minute=59, second=59,
microsecond=999999)),
({'mm': '0.00000000999'},
datetime.time()),
({'mm': '0.0000000999'},
datetime.time(microsecond=5)),
({'ss': '0.0000001'},
datetime.time()),
({'ss': '2.0000048'},
datetime.time(second=2,
microsecond=4)))
for testtuple in testtuples:
result = PythonTimeBuilder.build_time(**testtuple[0])
self.assertEqual(result, testtuple[1])
def test_build_time_bounds_checking(self):
#Leap seconds not supported
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
#https://bitbucket.org/nielsenb/aniso8601/issues/13/parsing-of-leap-second-gives-wildly
with self.assertRaises(LeapSecondError):
PythonTimeBuilder.build_time(hh='23', mm='59', ss='60')
with self.assertRaises(LeapSecondError):
PythonTimeBuilder.build_time(hh='23', mm='59', ss='60',
tz=UTCOffset(name='UTC', minutes=0))
with self.assertRaises(SecondsOutOfBoundsError):
PythonTimeBuilder.build_time(hh='00', mm='00', ss='60')
with self.assertRaises(SecondsOutOfBoundsError):
PythonTimeBuilder.build_time(hh='00', mm='00', ss='60',
tz=UTCOffset(name='UTC', minutes=0))
with self.assertRaises(SecondsOutOfBoundsError):
PythonTimeBuilder.build_time(hh='00', mm='00', ss='61')
with self.assertRaises(SecondsOutOfBoundsError):
PythonTimeBuilder.build_time(hh='00', mm='00', ss='61',
tz=UTCOffset(name='UTC', minutes=0))
with self.assertRaises(MinutesOutOfBoundsError):
PythonTimeBuilder.build_time(hh='00', mm='61')
with self.assertRaises(MinutesOutOfBoundsError):
PythonTimeBuilder.build_time(hh='00', mm='61',
tz=UTCOffset(name='UTC', minutes=0))
with self.assertRaises(MinutesOutOfBoundsError):
PythonTimeBuilder.build_time(hh='00', mm='60')
with self.assertRaises(MinutesOutOfBoundsError):
PythonTimeBuilder.build_time(hh='00', mm='60.1')
with self.assertRaises(HoursOutOfBoundsError):
PythonTimeBuilder.build_time(hh='25')
#Hour 24 can only represent midnight
with self.assertRaises(MidnightBoundsError):
PythonTimeBuilder.build_time(hh='24', mm='00', ss='01')
with self.assertRaises(MidnightBoundsError):
PythonTimeBuilder.build_time(hh='24', mm='00.1')
with self.assertRaises(MidnightBoundsError):
PythonTimeBuilder.build_time(hh='24', mm='01')
with self.assertRaises(MidnightBoundsError):
PythonTimeBuilder.build_time(hh='24.1')
def test_build_datetime(self):
testtuples = (((('2019', '06', '05', None, None, None, 'date'),
('01', '03', '11.858714', None, 'time')),
datetime.datetime(2019, 6, 5, hour=1, minute=3,
second=11, microsecond=858714)),
((('1234', '02', '03', None, None, None, 'date'),
('23', '21', '28.512400', None, 'time')),
datetime.datetime(1234, 2, 3, hour=23, minute=21,
second=28, microsecond=512400)),
((('1981', '04', '05', None, None, None, 'date'),
('23', '21', '28.512400',
(False, None, '11', '15', '+11:15', 'timezone'),
'time')),
datetime.datetime(1981, 4, 5, hour=23, minute=21,
second=28, microsecond=512400,
tzinfo=UTCOffset(name='+11:15',
minutes=675))))
for testtuple in testtuples:
result = PythonTimeBuilder.build_datetime(*testtuple[0])
self.assertEqual(result, testtuple[1])
def test_build_datetime_bounds_checking(self):
#Leap seconds not supported
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
#https://bitbucket.org/nielsenb/aniso8601/issues/13/parsing-of-leap-second-gives-wildly
with self.assertRaises(LeapSecondError):
PythonTimeBuilder.build_datetime(('2016', '12', '31',
None, None, None, 'date'),
('23', '59', '60', None, 'time'))
with self.assertRaises(LeapSecondError):
PythonTimeBuilder.build_datetime(('2016', '12', '31',
None, None, None, 'date'),
('23', '59', '60',
(False, None, '00', '00',
'+00:00', 'timezone'), 'time'))
with self.assertRaises(SecondsOutOfBoundsError):
PythonTimeBuilder.build_datetime(('1981', '04', '05',
None, None, None, 'date'),
('00', '00', '60', None, 'time'))
with self.assertRaises(SecondsOutOfBoundsError):
PythonTimeBuilder.build_datetime(('1981', '04', '05',
None, None, None, 'date'),
('00', '00', '60',
(False, None, '00', '00',
'+00:00', 'timezone'), 'time'))
with self.assertRaises(SecondsOutOfBoundsError):
PythonTimeBuilder.build_datetime(('1981', '04', '05',
None, None, None, 'date'),
('00', '00', '61', None, 'time'))
with self.assertRaises(SecondsOutOfBoundsError):
PythonTimeBuilder.build_datetime(('1981', '04', '05',
None, None, None, 'date'),
('00', '00', '61',
(False, None, '00', '00',
'+00:00', 'timezone'), 'time'))
with self.assertRaises(SecondsOutOfBoundsError):
PythonTimeBuilder.build_datetime(('1981', '04', '05',
None, None, None, 'date'),
('00', '59', '61', None, 'time'))
with self.assertRaises(SecondsOutOfBoundsError):
PythonTimeBuilder.build_datetime(('1981', '04', '05',
None, None, None, 'date'),
('00', '59', '61',
(False, None, '00', '00',
'+00:00', 'timezone'), 'time'))
with self.assertRaises(MinutesOutOfBoundsError):
PythonTimeBuilder.build_datetime(('1981', '04', '05',
None, None, None, 'date'),
('00', '61', None, None, 'time'))
with self.assertRaises(MinutesOutOfBoundsError):
PythonTimeBuilder.build_datetime(('1981', '04', '05',
None, None, None, 'date'),
('00', '61', None,
(False, None, '00', '00',
'+00:00', 'timezone'), 'time'))
def test_build_duration(self):
testtuples = (({'PnY': '1', 'PnM': '2', 'PnD': '3',
'TnH': '4', 'TnM': '54', 'TnS': '6'},
datetime.timedelta(days=428, hours=4,
minutes=54, seconds=6)),
({'PnY': '1', 'PnM': '2', 'PnD': '3',
'TnH': '4', 'TnM': '54', 'TnS': '6.5'},
datetime.timedelta(days=428, hours=4,
minutes=54, seconds=6.5)),
({'PnY': '1', 'PnM': '2', 'PnD': '3'},
datetime.timedelta(days=428)),
({'PnY': '1', 'PnM': '2', 'PnD': '3.5'},
datetime.timedelta(days=428.5)),
({'TnH': '4', 'TnM': '54', 'TnS': '6.5'},
datetime.timedelta(hours=4, minutes=54, seconds=6.5)),
({'TnH': '1', 'TnM': '3', 'TnS': '11.858714'},
datetime.timedelta(hours=1, minutes=3,
seconds=11, microseconds=858714)),
({'TnH': '4', 'TnM': '54', 'TnS': '28.512400'},
datetime.timedelta(hours=4, minutes=54,
seconds=28, microseconds=512400)),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
#https://bitbucket.org/nielsenb/aniso8601/issues/21/sub-microsecond-precision-is-lost-when
({'PnY': '1999.9999999999999999'},
datetime.timedelta(days=729999, seconds=86399,
microseconds=999999)),
({'PnM': '1.9999999999999999'},
datetime.timedelta(days=59, hours=23,
minutes=59, seconds=59,
microseconds=999999)),
({'PnW': '1.9999999999999999'},
datetime.timedelta(days=13, hours=23,
minutes=59, seconds=59,
microseconds=999999)),
({'PnD': '1.9999999999999999'},
datetime.timedelta(days=1, hours=23,
minutes=59, seconds=59,
microseconds=999999)),
({'TnH': '14.9999999999999999'},
datetime.timedelta(hours=14, minutes=59,
seconds=59, microseconds=999999)),
({'TnM': '0.00000000999'}, datetime.timedelta(0)),
({'TnM': '0.0000000999'},
datetime.timedelta(microseconds=5)),
({'TnS': '0.0000001'}, datetime.timedelta(0)),
({'TnS': '2.0000048'},
datetime.timedelta(seconds=2, microseconds=4)),
({'PnY': '1'}, datetime.timedelta(days=365)),
({'PnY': '1.5'}, datetime.timedelta(days=547.5)),
({'PnM': '1'}, datetime.timedelta(days=30)),
({'PnM': '1.5'}, datetime.timedelta(days=45)),
({'PnW': '1'}, datetime.timedelta(days=7)),
({'PnW': '1.5'}, datetime.timedelta(days=10.5)),
({'PnD': '1'}, datetime.timedelta(days=1)),
({'PnD': '1.5'}, datetime.timedelta(days=1.5)),
({'PnY': '0003', 'PnM': '06', 'PnD': '04',
'TnH': '12', 'TnM': '30', 'TnS': '05'},
datetime.timedelta(days=1279, hours=12,
minutes=30, seconds=5)),
({'PnY': '0003', 'PnM': '06', 'PnD': '04',
'TnH': '12', 'TnM': '30', 'TnS': '05.5'},
datetime.timedelta(days=1279, hours=12,
minutes=30, seconds=5.5)),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
({'PnY': '0001', 'PnM': '02', 'PnD': '03',
'TnH': '14', 'TnM': '43', 'TnS': '59.9999997'},
datetime.timedelta(days=428, hours=14,
minutes=43, seconds=59,
microseconds=999999)),
#Verify overflows
({'TnH': '36'}, datetime.timedelta(days=1, hours=12)))
for testtuple in testtuples:
result = PythonTimeBuilder.build_duration(**testtuple[0])
self.assertEqual(result, testtuple[1])
def test_build_interval(self):
testtuples = (({'end': (('1981', '04', '05', None, None, None, 'date'),
('01', '01', '00', None, 'time'), 'datetime'),
'duration': (None, '1', None, None, None, None, None,
'duration')},
datetime.datetime(year=1981, month=4, day=5,
hour=1, minute=1),
datetime.datetime(year=1981, month=3, day=6,
hour=1, minute=1)),
({'end': ('1981', '04', '05', None, None, None, 'date'),
'duration': (None, '1', None, None, None, None, None,
'duration')},
datetime.date(year=1981, month=4, day=5),
datetime.date(year=1981, month=3, day=6)),
({'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': ('1.5', None, None, None, None, None, None,
'duration')},
datetime.date(year=2018, month=3, day=6),
datetime.datetime(year=2016, month=9, day=4,
hour=12)),
({'end': ('2014', '11', '12', None, None, None, 'date'),
'duration': (None, None, None, None, '1', None, None,
'duration')},
datetime.date(year=2014, month=11, day=12),
datetime.datetime(year=2014, month=11, day=11,
hour=23)),
({'end': ('2014', '11', '12', None, None, None, 'date'),
'duration': (None, None, None, None, '4', '54', '6.5',
'duration')},
datetime.date(year=2014, month=11, day=12),
datetime.datetime(year=2014, month=11, day=11,
hour=19, minute=5, second=53,
microsecond=500000)),
({'end': (('2050', '03', '01',
None, None, None, 'date'),
('13', '00', '00',
(False, True, None, None,
'Z', 'timezone'), 'time'), 'datetime'),
'duration': (None, None, None,
None, '10', None, None, 'duration')},
datetime.datetime(year=2050, month=3, day=1,
hour=13,
tzinfo=UTCOffset(name='UTC',
minutes=0)),
datetime.datetime(year=2050, month=3, day=1,
hour=3,
tzinfo=UTCOffset(name='UTC',
minutes=0))),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
#https://bitbucket.org/nielsenb/aniso8601/issues/21/sub-microsecond-precision-is-lost-when
({'end': ('2000', '01', '01',
None, None, None, 'date'),
'duration': ('1999.9999999999999999', None, None,
None, None, None,
None, 'duration')},
datetime.date(year=2000, month=1, day=1),
datetime.datetime(year=1, month=4, day=30,
hour=0, minute=0, second=0,
microsecond=1)),
({'end': ('1989', '03', '01',
None, None, None, 'date'),
'duration': (None, '1.9999999999999999', None,
None, None, None,
None, 'duration')},
datetime.date(year=1989, month=3, day=1),
datetime.datetime(year=1988, month=12, day=31,
hour=0, minute=0, second=0,
microsecond=1)),
({'end': ('1989', '03', '01',
None, None, None, 'date'),
'duration': (None, None, '1.9999999999999999',
None, None, None,
None, 'duration')},
datetime.date(year=1989, month=3, day=1),
datetime.datetime(year=1989, month=2, day=15,
hour=0, minute=0, second=0,
microsecond=1)),
({'end': ('1989', '03', '01',
None, None, None, 'date'),
'duration': (None, None, None,
'1.9999999999999999', None, None,
None, 'duration')},
datetime.date(year=1989, month=3, day=1),
datetime.datetime(year=1989, month=2, day=27,
hour=0, minute=0, second=0,
microsecond=1)),
({'end': ('2001', '01', '01',
None, None, None, 'date'),
'duration': (None, None, None,
None, '14.9999999999999999', None,
None, 'duration')},
datetime.date(year=2001, month=1, day=1),
datetime.datetime(year=2000, month=12, day=31,
hour=9, minute=0, second=0,
microsecond=1)),
({'end': ('2001', '01', '01',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, '0.00000000999',
None, 'duration')},
datetime.date(year=2001, month=1, day=1),
datetime.datetime(year=2001, month=1, day=1)),
({'end': ('2001', '01', '01',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, '0.0000000999',
None, 'duration')},
datetime.date(year=2001, month=1, day=1),
datetime.datetime(year=2000, month=12, day=31,
hour=23, minute=59, second=59,
microsecond=999995)),
({'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'0.0000001', 'duration')},
datetime.date(year=2018, month=3, day=6),
datetime.datetime(year=2018, month=3, day=6)),
({'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'2.0000048', 'duration')},
datetime.date(year=2018, month=3, day=6),
datetime.datetime(year=2018, month=3, day=5,
hour=23, minute=59, second=57,
microsecond=999996)),
({'start': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'duration': (None, '1', None,
'1', None, '1', None, 'duration')},
datetime.datetime(year=1981, month=4, day=5,
hour=1, minute=1),
datetime.datetime(year=1981, month=5, day=6,
hour=1, minute=2)),
({'start': ('1981', '04', '05',
None, None, None, 'date'),
'duration': (None, '1', None,
'1', None, None, None, 'duration')},
datetime.date(year=1981, month=4, day=5),
datetime.date(year=1981, month=5, day=6)),
({'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, '2.5', None,
None, None, None, None, 'duration')},
datetime.date(year=2018, month=3, day=6),
datetime.date(year=2018, month=5, day=20)),
({'start': ('2014', '11', '12',
None, None, None, 'date'),
'duration': (None, None, None,
None, '1', None, None, 'duration')},
datetime.date(year=2014, month=11, day=12),
datetime.datetime(year=2014, month=11, day=12,
hour=1, minute=0)),
({'start': ('2014', '11', '12',
None, None, None, 'date'),
'duration': (None, None, None,
None, '4', '54', '6.5', 'duration')},
datetime.date(year=2014, month=11, day=12),
datetime.datetime(year=2014, month=11, day=12,
hour=4, minute=54, second=6,
microsecond=500000)),
({'start': (('2050', '03', '01',
None, None, None, 'date'),
('13', '00', '00',
(False, True, None, None,
'Z', 'timezone'), 'time'), 'datetime'),
'duration': (None, None, None,
None, '10', None, None, 'duration')},
datetime.datetime(year=2050, month=3, day=1,
hour=13,
tzinfo=UTCOffset(name='UTC',
minutes=0)),
datetime.datetime(year=2050, month=3, day=1,
hour=23,
tzinfo=UTCOffset(name='UTC',
minutes=0))),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
({'start': ('0001', '01', '01',
None, None, None, 'date'),
'duration': ('1999.9999999999999999', None, None,
None, None, None,
None, 'duration')},
datetime.date(year=1, month=1, day=1),
datetime.datetime(year=1999, month=9, day=3,
hour=23, minute=59, second=59,
microsecond=999999)),
({'start': ('1989', '03', '01',
None, None, None, 'date'),
'duration': (None, '1.9999999999999999', None,
None, None, None,
None, 'duration')},
datetime.date(year=1989, month=3, day=1),
datetime.datetime(year=1989, month=4, day=29,
hour=23, minute=59, second=59,
microsecond=999999)),
({'start': ('1989', '03', '01',
None, None, None, 'date'),
'duration': (None, None, '1.9999999999999999',
None, None, None,
None, 'duration')},
datetime.date(year=1989, month=3, day=1),
datetime.datetime(year=1989, month=3, day=14,
hour=23, minute=59, second=59,
microsecond=999999)),
({'start': ('1989', '03', '01',
None, None, None, 'date'),
'duration': (None, None, None,
'1.9999999999999999', None, None,
None, 'duration')},
datetime.date(year=1989, month=3, day=1),
datetime.datetime(year=1989, month=3, day=2,
hour=23, minute=59, second=59,
microsecond=999999)),
({'start': ('2001', '01', '01',
None, None, None, 'date'),
'duration': (None, None, None,
None, '14.9999999999999999', None,
None, 'duration')},
datetime.date(year=2001, month=1, day=1),
datetime.datetime(year=2001, month=1, day=1,
hour=14, minute=59, second=59,
microsecond=999999)),
({'start': ('2001', '01', '01',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, '0.00000000999',
None, 'duration')},
datetime.date(year=2001, month=1, day=1),
datetime.datetime(year=2001, month=1, day=1)),
({'start': ('2001', '01', '01',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, '0.0000000999',
None, 'duration')},
datetime.date(year=2001, month=1, day=1),
datetime.datetime(year=2001, month=1, day=1,
hour=0, minute=0, second=0,
microsecond=5)),
({'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'0.0000001', 'duration')},
datetime.date(year=2018, month=3, day=6),
datetime.datetime(year=2018, month=3, day=6)),
({'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'2.0000048', 'duration')},
datetime.date(year=2018, month=3, day=6),
datetime.datetime(year=2018, month=3, day=6,
hour=0, minute=0, second=2,
microsecond=4)),
({'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime')},
datetime.datetime(year=1980, month=3, day=5,
hour=1, minute=1),
datetime.datetime(year=1981, month=4, day=5,
hour=1, minute=1)),
({'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
'end': ('1981', '04', '05',
None, None, None, 'date')},
datetime.datetime(year=1980, month=3, day=5,
hour=1, minute=1),
datetime.date(year=1981, month=4, day=5)),
({'start': ('1980', '03', '05',
None, None, None, 'date'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime')},
datetime.date(year=1980, month=3, day=5),
datetime.datetime(year=1981, month=4, day=5,
hour=1, minute=1)),
({'start': ('1980', '03', '05',
None, None, None, 'date'),
'end': ('1981', '04', '05',
None, None, None, 'date')},
datetime.date(year=1980, month=3, day=5),
datetime.date(year=1981, month=4, day=5)),
({'start': ('1981', '04', '05',
None, None, None, 'date'),
'end': ('1980', '03', '05',
None, None, None, 'date')},
datetime.date(year=1981, month=4, day=5),
datetime.date(year=1980, month=3, day=5)),
({'start': (('2050', '03', '01',
None, None, None, 'date'),
('13', '00', '00',
(False, True, None, None,
'Z', 'timezone'), 'time'), 'datetime'),
'end': (('2050', '05', '11',
None, None, None, 'date'),
('15', '30', '00',
(False, True, None, None,
'Z', 'timezone'), 'time'), 'datetime')},
datetime.datetime(year=2050, month=3, day=1,
hour=13,
tzinfo=UTCOffset(name='UTC',
minutes=0)),
datetime.datetime(year=2050, month=5, day=11,
hour=15, minute=30,
tzinfo=UTCOffset(name='UTC',
minutes=0))),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
({'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00.0000001',
None, 'time'), 'datetime'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('14', '43', '59.9999997', None, 'time'),
'datetime')},
datetime.datetime(year=1980, month=3, day=5,
hour=1, minute=1),
datetime.datetime(year=1981, month=4, day=5,
hour=14, minute=43, second=59,
microsecond=999999)))
for testtuple in testtuples:
result = PythonTimeBuilder.build_interval(**testtuple[0])
self.assertEqual(result[0], testtuple[1])
self.assertEqual(result[1], testtuple[2])
def test_build_repeating_interval(self):
args = {'Rnn': '3', 'interval': (('1981', '04', '05',
None, None, None, 'date'),
None,
(None, None, None,
'1', None, None,
None, 'duration'),
'interval')}
results = list(PythonTimeBuilder.build_repeating_interval(**args))
self.assertEqual(results[0], datetime.date(year=1981, month=4, day=5))
self.assertEqual(results[1], datetime.date(year=1981, month=4, day=6))
self.assertEqual(results[2], datetime.date(year=1981, month=4, day=7))
args = {'Rnn': '11', 'interval': (None,
(('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
(None, None, None,
None, '1', '2',
None, 'duration'),
'interval')}
results = list(PythonTimeBuilder.build_repeating_interval(**args))
for dateindex in compat.range(0, 11):
self.assertEqual(results[dateindex],
datetime.datetime(year=1980, month=3, day=5,
hour=1, minute=1)
- dateindex * datetime.timedelta(hours=1,
minutes=2))
args = {'Rnn': '2', 'interval': ((('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
(('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
None,
'interval')}
results = list(PythonTimeBuilder.build_repeating_interval(**args))
self.assertEqual(results[0],
datetime.datetime(year=1980, month=3, day=5,
hour=1, minute=1))
self.assertEqual(results[1],
datetime.datetime(year=1981, month=4, day=5,
hour=1, minute=1))
args = {'Rnn': '2', 'interval': ((('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
(('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
None,
'interval')}
results = list(PythonTimeBuilder.build_repeating_interval(**args))
self.assertEqual(results[0],
datetime.datetime(year=1980, month=3, day=5,
hour=1, minute=1))
self.assertEqual(results[1],
datetime.datetime(year=1981, month=4, day=5,
hour=1, minute=1))
args = {'R': True, 'interval': (None,
(('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
(None, None, None,
None, '1', '2', None, 'duration'),
'interval')}
resultgenerator = PythonTimeBuilder.build_repeating_interval(**args)
#Test the first 11 generated
for dateindex in compat.range(0, 11):
self.assertEqual(next(resultgenerator),
datetime.datetime(year=1980, month=3, day=5,
hour=1, minute=1)
- dateindex * datetime.timedelta(hours=1,
minutes=2))
def test_build_timezone(self):
testtuples = (({'Z': True, 'name': 'Z'},
datetime.timedelta(hours=0), 'UTC'),
({'negative': False, 'hh': '00', 'mm': '00',
'name': '+00:00'},
datetime.timedelta(hours=0), '+00:00'),
({'negative': False, 'hh': '01', 'mm': '00',
'name': '+01:00'},
datetime.timedelta(hours=1), '+01:00'),
({'negative': True, 'hh': '01', 'mm': '00',
'name': '-01:00'},
-datetime.timedelta(hours=1), '-01:00'),
({'negative': False, 'hh': '00', 'mm': '12',
'name': '+00:12'},
datetime.timedelta(minutes=12), '+00:12'),
({'negative': False, 'hh': '01', 'mm': '23',
'name': '+01:23'},
datetime.timedelta(hours=1, minutes=23), '+01:23'),
({'negative': True, 'hh': '01', 'mm': '23',
'name': '-01:23'},
-datetime.timedelta(hours=1, minutes=23), '-01:23'),
({'negative': False, 'hh': '00',
'name': '+00'},
datetime.timedelta(hours=0), '+00'),
({'negative': False, 'hh': '01',
'name': '+01'},
datetime.timedelta(hours=1), '+01'),
({'negative': True, 'hh': '01',
'name': '-01'},
-datetime.timedelta(hours=1), '-01'),
({'negative': False, 'hh': '12',
'name': '+12'},
datetime.timedelta(hours=12), '+12'),
({'negative': True, 'hh': '12',
'name': '-12'},
-datetime.timedelta(hours=12), '-12'))
for testtuple in testtuples:
result = PythonTimeBuilder.build_timezone(**testtuple[0])
self.assertEqual(result.utcoffset(None), testtuple[1])
self.assertEqual(result.tzname(None), testtuple[2])
def test_build_week_date(self):
weekdate = PythonTimeBuilder._build_week_date(2009, 1)
self.assertEqual(weekdate, datetime.date(year=2008, month=12, day=29))
weekdate = PythonTimeBuilder._build_week_date(2009, 53, isoday=7)
self.assertEqual(weekdate, datetime.date(year=2010, month=1, day=3))
def test_build_ordinal_date(self):
ordinaldate = PythonTimeBuilder._build_ordinal_date(1981, 95)
self.assertEqual(ordinaldate, datetime.date(year=1981, month=4, day=5))
def test_build_ordinal_date_bounds_checking(self):
with self.assertRaises(DayOutOfBoundsError):
PythonTimeBuilder._build_ordinal_date(1234, 0)
with self.assertRaises(DayOutOfBoundsError):
PythonTimeBuilder._build_ordinal_date(1234, 367)
def test_iso_year_start(self):
yearstart = PythonTimeBuilder._iso_year_start(2004)
self.assertEqual(yearstart, datetime.date(year=2003, month=12, day=29))
yearstart = PythonTimeBuilder._iso_year_start(2010)
self.assertEqual(yearstart, datetime.date(year=2010, month=1, day=4))
yearstart = PythonTimeBuilder._iso_year_start(2009)
self.assertEqual(yearstart, datetime.date(year=2008, month=12, day=29))
def test_date_generator(self):
startdate = datetime.date(year=2018, month=8, day=29)
timedelta = datetime.timedelta(days=1)
iterations = 10
generator = PythonTimeBuilder._date_generator(startdate,
timedelta,
iterations)
results = list(generator)
for dateindex in compat.range(0, 10):
self.assertEqual(results[dateindex],
datetime.date(year=2018, month=8, day=29)
+ dateindex * datetime.timedelta(days=1))
def test_date_generator_unbounded(self):
startdate = datetime.date(year=2018, month=8, day=29)
timedelta = datetime.timedelta(days=5)
generator = PythonTimeBuilder._date_generator_unbounded(startdate,
timedelta)
#Check the first 10 results
for dateindex in compat.range(0, 10):
self.assertEqual(next(generator),
datetime.date(year=2018, month=8, day=29)
+ dateindex * datetime.timedelta(days=5))
def test_split_to_microseconds(self):
result = PythonTimeBuilder._split_to_microseconds('1.1', int(1e6), 'dummy')
self.assertEqual(result, (1, 100000))
self.assertIsInstance(result[0], int)
self.assertIsInstance(result[1], int)
result = PythonTimeBuilder._split_to_microseconds('1.000001', int(1e6), 'dummy')
self.assertEqual(result, (1, 1))
self.assertIsInstance(result[0], int)
self.assertIsInstance(result[1], int)
result = PythonTimeBuilder._split_to_microseconds('1.0000001', int(1e6), 'dummy')
self.assertEqual(result, (1, 0))
self.assertIsInstance(result[0], int)
self.assertIsInstance(result[1], int)
def test_split_to_microseconds_exception(self):
with self.assertRaises(ISOFormatError) as e:
PythonTimeBuilder._split_to_microseconds('b.1', int(1e6), 'exception text')
self.assertEqual(str(e.exception), 'exception text')
with self.assertRaises(ISOFormatError) as e:
PythonTimeBuilder._split_to_microseconds('1.ad', int(1e6), 'exception text')
self.assertEqual(str(e.exception), 'exception text')
def test_distribute_microseconds(self):
self.assertEqual(PythonTimeBuilder._distribute_microseconds(1, (), ()), (1,))
self.assertEqual(PythonTimeBuilder._distribute_microseconds(11, (0,), (10,)), (1, 1))
self.assertEqual(PythonTimeBuilder._distribute_microseconds(211, (0, 0), (100, 10)), (2, 1, 1))
self.assertEqual(PythonTimeBuilder._distribute_microseconds(1, (), ()), (1,))
self.assertEqual(PythonTimeBuilder._distribute_microseconds(11, (5,), (10,)), (6, 1))
self.assertEqual(PythonTimeBuilder._distribute_microseconds(211, (10, 5), (100, 10)), (12, 6, 1))
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/builders
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/builders/tests/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/builders
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/builders/tests/test_init.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import unittest
import aniso8601
from aniso8601.builders import BaseTimeBuilder, TupleBuilder
from aniso8601.exceptions import ISOFormatError
from aniso8601.tests.compat import mock
class TestBaseTimeBuilder(unittest.TestCase):
def test_build_date(self):
with self.assertRaises(NotImplementedError):
BaseTimeBuilder.build_date()
def test_build_time(self):
with self.assertRaises(NotImplementedError):
BaseTimeBuilder.build_time()
def test_build_datetime(self):
with self.assertRaises(NotImplementedError):
BaseTimeBuilder.build_datetime(None, None)
def test_build_duration(self):
with self.assertRaises(NotImplementedError):
BaseTimeBuilder.build_duration()
def test_build_interval(self):
with self.assertRaises(NotImplementedError):
BaseTimeBuilder.build_interval()
def test_build_repeating_interval(self):
with self.assertRaises(NotImplementedError):
BaseTimeBuilder.build_repeating_interval()
def test_build_timezone(self):
with self.assertRaises(NotImplementedError):
BaseTimeBuilder.build_timezone()
def test_cast(self):
self.assertEqual(BaseTimeBuilder.cast('1', int), 1)
self.assertEqual(BaseTimeBuilder.cast('-2', int), -2)
self.assertEqual(BaseTimeBuilder.cast('3', float), float(3))
self.assertEqual(BaseTimeBuilder.cast('-4', float), float(-4))
self.assertEqual(BaseTimeBuilder.cast('5.6', float), 5.6)
self.assertEqual(BaseTimeBuilder.cast('-7.8', float), -7.8)
def test_cast_exception(self):
with self.assertRaises(ISOFormatError):
BaseTimeBuilder.cast('asdf', int)
with self.assertRaises(ISOFormatError):
BaseTimeBuilder.cast('asdf', float)
def test_cast_caughtexception(self):
def tester(value):
raise RuntimeError
with self.assertRaises(ISOFormatError):
BaseTimeBuilder.cast('asdf', tester,
caughtexceptions=(RuntimeError,))
def test_cast_thrownexception(self):
with self.assertRaises(RuntimeError):
BaseTimeBuilder.cast('asdf', int,
thrownexception=RuntimeError)
def test_build_object(self):
datetest = (('1', '2', '3', '4', '5', '6', 'date'),
{'YYYY': '1', 'MM': '2', 'DD': '3',
'Www': '4', 'D': '5', 'DDD': '6'})
timetest = (('1', '2', '3',
(False, False, '4', '5', 'tz name', 'timezone'),
'time'),
{'hh': '1', 'mm': '2', 'ss': '3',
'tz': (False, False, '4', '5', 'tz name', 'timezone')})
datetimetest = ((('1', '2', '3', '4', '5', '6', 'date'),
('7', '8', '9',
(True, False, '10', '11', 'tz name', 'timezone'),
'time'),
'datetime'),
(('1', '2', '3', '4', '5', '6', 'date'),
('7', '8', '9',
(True, False, '10', '11', 'tz name', 'timezone'),
'time')))
durationtest = (('1', '2', '3', '4', '5', '6', '7', 'duration'),
{'PnY': '1', 'PnM': '2', 'PnW': '3', 'PnD': '4',
'TnH': '5', 'TnM': '6', 'TnS': '7'})
intervaltests = (((('1', '2', '3', '4', '5', '6', 'date'),
('7', '8', '9', '10', '11', '12', 'date'),
None, 'interval'),
{'start': ('1', '2', '3', '4', '5', '6', 'date'),
'end': ('7', '8', '9', '10', '11', '12', 'date'),
'duration': None}),
((('1', '2', '3', '4', '5', '6', 'date'),
None,
('7', '8', '9', '10', '11', '12', '13', 'duration'),
'interval'),
{'start': ('1', '2', '3', '4', '5', '6', 'date'),
'end': None,
'duration': ('7', '8', '9', '10', '11', '12', '13',
'duration')}),
((None,
('1', '2', '3',
(True, False, '4', '5', 'tz name', 'timezone'),
'time'),
('6', '7', '8', '9', '10', '11', '12', 'duration'),
'interval'),
{'start': None,
'end': ('1', '2', '3',
(True, False, '4', '5', 'tz name',
'timezone'),
'time'),
'duration': ('6', '7', '8', '9', '10', '11', '12',
'duration')}))
repeatingintervaltests = (((True,
None,
(('1', '2', '3', '4', '5', '6', 'date'),
('7', '8', '9', '10', '11', '12', 'date'),
None, 'interval'), 'repeatinginterval'),
{'R': True,
'Rnn': None,
'interval': (('1', '2', '3',
'4', '5', '6', 'date'),
('7', '8', '9',
'10', '11', '12', 'date'),
None, 'interval')}),
((False,
'1',
((('2', '3', '4', '5', '6', '7', 'date'),
('8', '9', '10', None, 'time'),
'datetime'),
(('11', '12', '13', '14', '15', '16',
'date'),
('17', '18', '19', None, 'time'),
'datetime'),
None, 'interval'), 'repeatinginterval'),
{'R':False,
'Rnn': '1',
'interval': ((('2', '3', '4',
'5', '6', '7', 'date'),
('8', '9', '10', None,
'time'), 'datetime'),
(('11', '12', '13',
'14', '15', '16', 'date'),
('17', '18', '19', None,
'time'), 'datetime'),
None, 'interval')}))
timezonetest = ((False, False, '1', '2', '+01:02', 'timezone'),
{'negative': False, 'Z': False,
'hh': '1', 'mm': '2', 'name': '+01:02'})
with mock.patch.object(aniso8601.builders.BaseTimeBuilder,
'build_date') as mock_build:
mock_build.return_value = datetest[0]
result = BaseTimeBuilder._build_object(datetest[0])
self.assertEqual(result, datetest[0])
mock_build.assert_called_once_with(**datetest[1])
with mock.patch.object(aniso8601.builders.BaseTimeBuilder,
'build_time') as mock_build:
mock_build.return_value = timetest[0]
result = BaseTimeBuilder._build_object(timetest[0])
self.assertEqual(result, timetest[0])
mock_build.assert_called_once_with(**timetest[1])
with mock.patch.object(aniso8601.builders.BaseTimeBuilder,
'build_datetime') as mock_build:
mock_build.return_value = datetimetest[0]
result = BaseTimeBuilder._build_object(datetimetest[0])
self.assertEqual(result, datetimetest[0])
mock_build.assert_called_once_with(*datetimetest[1])
with mock.patch.object(aniso8601.builders.BaseTimeBuilder,
'build_duration') as mock_build:
mock_build.return_value = durationtest[0]
result = BaseTimeBuilder._build_object(durationtest[0])
self.assertEqual(result, durationtest[0])
mock_build.assert_called_once_with(**durationtest[1])
for intervaltest in intervaltests:
with mock.patch.object(aniso8601.builders.BaseTimeBuilder,
'build_interval') as mock_build:
mock_build.return_value = intervaltest[0]
result = BaseTimeBuilder._build_object(intervaltest[0])
self.assertEqual(result, intervaltest[0])
mock_build.assert_called_once_with(**intervaltest[1])
for repeatingintervaltest in repeatingintervaltests:
with mock.patch.object(aniso8601.builders.BaseTimeBuilder,
'build_repeating_interval') as mock_build:
mock_build.return_value = repeatingintervaltest[0]
result = BaseTimeBuilder._build_object(repeatingintervaltest[0])
self.assertEqual(result, repeatingintervaltest[0])
mock_build.assert_called_once_with(**repeatingintervaltest[1])
with mock.patch.object(aniso8601.builders.BaseTimeBuilder,
'build_timezone') as mock_build:
mock_build.return_value = timezonetest[0]
result = BaseTimeBuilder._build_object(timezonetest[0])
self.assertEqual(result, timezonetest[0])
mock_build.assert_called_once_with(**timezonetest[1])
class TestTupleBuilder(unittest.TestCase):
def test_build_date(self):
datetuple = TupleBuilder.build_date()
self.assertEqual(datetuple, (None, None, None,
None, None, None,
'date'))
datetuple = TupleBuilder.build_date(YYYY='1', MM='2', DD='3',
Www='4', D='5', DDD='6')
self.assertEqual(datetuple, ('1', '2', '3',
'4', '5', '6',
'date'))
def test_build_time(self):
testtuples = (({}, (None, None, None, None, 'time')),
({'hh': '1', 'mm': '2', 'ss': '3', 'tz': None},
('1', '2', '3', None, 'time')),
({'hh': '1', 'mm': '2', 'ss': '3', 'tz': (False, False,
'4', '5',
'tz name',
'timezone')},
('1', '2', '3', (False, False, '4', '5',
'tz name', 'timezone'),
'time')))
for testtuple in testtuples:
self.assertEqual(TupleBuilder.build_time(**testtuple[0]),
testtuple[1])
def test_build_datetime(self):
testtuples = (({'date': ('1', '2', '3', '4', '5', '6', 'date'),
'time': ('7', '8', '9', None, 'time')},
(('1', '2', '3', '4', '5', '6', 'date'),
('7', '8', '9', None, 'time'),
'datetime')),
({'date': ('1', '2', '3', '4', '5', '6', 'date'),
'time': ('7', '8', '9',
(True, False, '10', '11', 'tz name',
'timezone'),
'time')},
(('1', '2', '3', '4', '5', '6', 'date'),
('7', '8', '9',
(True, False, '10', '11', 'tz name',
'timezone'),
'time'), 'datetime')))
for testtuple in testtuples:
self.assertEqual(TupleBuilder.build_datetime(**testtuple[0]),
testtuple[1])
def test_build_duration(self):
testtuples = (({}, (None, None, None, None, None, None, None,
'duration')),
({'PnY': '1', 'PnM': '2', 'PnW': '3', 'PnD': '4',
'TnH': '5', 'TnM': '6', 'TnS': '7'},
('1', '2', '3', '4',
'5', '6', '7',
'duration')))
for testtuple in testtuples:
self.assertEqual(TupleBuilder.build_duration(**testtuple[0]),
testtuple[1])
def test_build_interval(self):
testtuples = (({}, (None, None, None, 'interval')),
({'start': ('1', '2', '3', '4', '5', '6', 'date'),
'end': ('7', '8', '9', '10', '11', '12', 'date')},
(('1', '2', '3', '4', '5', '6', 'date'),
('7', '8', '9', '10', '11', '12', 'date'),
None, 'interval')),
({'start': ('1', '2', '3',
(True, False, '7', '8', 'tz name',
'timezone'),
'time'),
'end': ('4', '5', '6',
(False, False, '9', '10', 'tz name',
'timezone'),
'time')},
(('1', '2', '3',
(True, False, '7', '8', 'tz name',
'timezone'),
'time'),
('4', '5', '6',
(False, False, '9', '10', 'tz name',
'timezone'),
'time'),
None, 'interval')),
({'start': (('1', '2', '3', '4', '5', '6', 'date'),
('7', '8', '9',
(True, False, '10', '11', 'tz name',
'timezone'),
'time'),
'datetime'),
'end': (('12', '13', '14', '15', '16', '17', 'date'),
('18', '19', '20',
(False, False, '21', '22', 'tz name',
'timezone'),
'time'),
'datetime')},
((('1', '2', '3', '4', '5', '6', 'date'),
('7', '8', '9',
(True, False, '10', '11', 'tz name',
'timezone'),
'time'),
'datetime'),
(('12', '13', '14', '15', '16', '17', 'date'),
('18', '19', '20',
(False, False, '21', '22', 'tz name',
'timezone'),
'time'),
'datetime'),
None, 'interval')),
({'start': ('1', '2', '3', '4', '5', '6', 'date'),
'end': None,
'duration': ('7', '8', '9', '10', '11', '12', '13',
'duration')},
(('1', '2', '3', '4', '5', '6', 'date'),
None,
('7', '8', '9', '10', '11', '12', '13',
'duration'),
'interval')),
({'start': None,
'end': ('1', '2', '3',
(True, False, '4', '5', 'tz name',
'timezone'),
'time'),
'duration': ('6', '7', '8', '9', '10', '11', '12',
'duration')},
(None,
('1', '2', '3',
(True, False, '4', '5', 'tz name',
'timezone'),
'time'),
('6', '7', '8', '9', '10', '11', '12',
'duration'),
'interval')))
for testtuple in testtuples:
self.assertEqual(TupleBuilder.build_interval(**testtuple[0]),
testtuple[1])
def test_build_repeating_interval(self):
testtuples = (({}, (None, None, None, 'repeatinginterval')),
({'R': True,
'interval':(('1', '2', '3', '4', '5', '6', 'date'),
('7', '8', '9', '10', '11', '12', 'date'),
None, 'interval')},
(True, None, (('1', '2', '3', '4', '5', '6', 'date'),
('7', '8', '9', '10', '11', '12', 'date'),
None, 'interval'),
'repeatinginterval')),
({'R':False, 'Rnn': '1',
'interval': ((('2', '3', '4', '5', '6', '7',
'date'),
('8', '9', '10', None, 'time'),
'datetime'),
(('11', '12', '13', '14', '15', '16',
'date'),
('17', '18', '19', None, 'time'),
'datetime'),
None, 'interval')},
(False, '1',
((('2', '3', '4', '5', '6', '7',
'date'),
('8', '9', '10', None, 'time'),
'datetime'),
(('11', '12', '13', '14', '15', '16',
'date'),
('17', '18', '19', None, 'time'),
'datetime'),
None, 'interval'),
'repeatinginterval')))
for testtuple in testtuples:
result = TupleBuilder.build_repeating_interval(**testtuple[0])
self.assertEqual(result, testtuple[1])
def test_build_timezone(self):
testtuples = (({}, (None, None, None, None, '', 'timezone')),
({'negative': False, 'Z': True, 'name': 'UTC'},
(False, True, None, None, 'UTC', 'timezone')),
({'negative': False, 'Z': False, 'hh': '1', 'mm': '2',
'name': '+01:02'},
(False, False, '1', '2', '+01:02', 'timezone')),
({'negative': True, 'Z': False, 'hh': '1', 'mm': '2',
'name': '-01:02'},
(True, False, '1', '2', '-01:02', 'timezone')))
for testtuple in testtuples:
result = TupleBuilder.build_timezone(**testtuple[0])
self.assertEqual(result, testtuple[1])
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/tests/test_utcoffset.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import datetime
import pickle
import unittest
from aniso8601.utcoffset import UTCOffset
class TestUTCOffset(unittest.TestCase):
def test_pickle(self):
#Make sure timezone objects are pickleable
testutcoffset = UTCOffset(name='UTC', minutes=0)
utcoffsetpickle = pickle.dumps(testutcoffset)
resultutcoffset = pickle.loads(utcoffsetpickle)
self.assertEqual(resultutcoffset._name, testutcoffset._name)
self.assertEqual(resultutcoffset._utcdelta, testutcoffset._utcdelta)
def test_repr(self):
self.assertEqual(str(UTCOffset(minutes=0)), '+0:00:00 UTC')
self.assertEqual(str(UTCOffset(minutes=60)), '+1:00:00 UTC')
self.assertEqual(str(UTCOffset(minutes=-60)), '-1:00:00 UTC')
self.assertEqual(str(UTCOffset(minutes=12)), '+0:12:00 UTC')
self.assertEqual(str(UTCOffset(minutes=-12)), '-0:12:00 UTC')
self.assertEqual(str(UTCOffset(minutes=83)), '+1:23:00 UTC')
self.assertEqual(str(UTCOffset(minutes=-83)), '-1:23:00 UTC')
self.assertEqual(str(UTCOffset(minutes=1440)), '+1 day, 0:00:00 UTC')
self.assertEqual(str(UTCOffset(minutes=-1440)), '-1 day, 0:00:00 UTC')
self.assertEqual(str(UTCOffset(minutes=2967)), '+2 days, 1:27:00 UTC')
self.assertEqual(str(UTCOffset(minutes=-2967)), '-2 days, 1:27:00 UTC')
def test_dst(self):
tzinfoobject = UTCOffset(minutes=240)
#This would raise ISOFormatError or a TypeError if dst info is invalid
result = datetime.datetime.now(tzinfoobject)
#Hacky way to make sure the tzinfo is what we'd expect
self.assertEqual(result.tzinfo.utcoffset(None),
datetime.timedelta(hours=4))
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/tests/test_duration.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import unittest
import aniso8601
from aniso8601.exceptions import ISOFormatError, NegativeDurationError
from aniso8601.duration import (parse_duration, _parse_duration_prescribed,
_parse_duration_combined,
_parse_duration_prescribed_notime,
_parse_duration_prescribed_time,
_parse_duration_element,
_has_any_component, _component_order_correct)
from aniso8601.tests.compat import mock
class TestDurationParserFunctions(unittest.TestCase):
def test_parse_duration(self):
testtuples = (('P1Y2M3DT4H54M6S', {'PnY': '1', 'PnM': '2',
'PnD': '3', 'TnH': '4',
'TnM': '54', 'TnS': '6'}),
('P1Y2M3DT4H54M6,5S', {'PnY': '1', 'PnM': '2',
'PnD': '3', 'TnH': '4',
'TnM': '54', 'TnS': '6.5'}),
('P1Y2M3DT4H54M6.5S', {'PnY': '1', 'PnM': '2',
'PnD': '3', 'TnH': '4',
'TnM': '54', 'TnS': '6.5'}),
('P1Y2M3D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3'}),
('P1Y2M3,5D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3.5'}),
('P1Y2M3.5D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3.5'}),
('PT4H54M6,5S', {'PnY': None, 'PnM': None, 'PnD': None,
'TnH': '4', 'TnM': '54', 'TnS': '6.5'}),
('PT4H54M6.5S', {'PnY': None, 'PnM': None, 'PnD': None,
'TnH': '4', 'TnM': '54', 'TnS': '6.5'}),
('PT0.0000001S', {'PnY': None, 'PnM': None, 'PnD': None,
'TnH': None, 'TnM': None,
'TnS': '0.0000001'}),
('PT2.0000048S', {'PnY': None, 'PnM': None, 'PnD': None,
'TnH': None, 'TnM': None,
'TnS': '2.0000048'}),
('P1Y', {'PnY': '1', 'PnM': None,
'PnW': None, 'PnD': None}),
('P1,5Y', {'PnY': '1.5', 'PnM': None, 'PnW': None,
'PnD': None}),
('P1.5Y', {'PnY': '1.5', 'PnM': None, 'PnW': None,
'PnD': None}),
('P1M', {'PnY': None, 'PnM': '1', 'PnW': None,
'PnD': None}),
('P1,5M', {'PnY': None, 'PnM': '1.5', 'PnW': None,
'PnD':None}),
('P1.5M', {'PnY': None, 'PnM': '1.5', 'PnW': None,
'PnD':None}),
('P1W', {'PnY': None, 'PnM': None, 'PnW': '1',
'PnD': None}),
('P1,5W', {'PnY': None, 'PnM': None, 'PnW': '1.5',
'PnD': None}),
('P1.5W', {'PnY': None, 'PnM': None, 'PnW': '1.5',
'PnD': None}),
('P1D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1'}),
('P1,5D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1.5'}),
('P1.5D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1.5'}),
('P0003-06-04T12:30:05', {'PnY': '0003', 'PnM': '06',
'PnD': '04', 'TnH': '12',
'TnM': '30', 'TnS': '05'}),
('P0003-06-04T12:30:05.5', {'PnY': '0003', 'PnM': '06',
'PnD': '04', 'TnH': '12',
'TnM': '30', 'TnS': '05.5'}),
('P0001-02-03T14:43:59.9999997', {'PnY': '0001',
'PnM': '02',
'PnD': '03',
'TnH': '14',
'TnM': '43',
'TnS':
'59.9999997'}))
for testtuple in testtuples:
with mock.patch.object(aniso8601.duration.PythonTimeBuilder,
'build_duration') as mockBuildDuration:
mockBuildDuration.return_value = testtuple[1]
result = parse_duration(testtuple[0])
self.assertEqual(result, testtuple[1])
mockBuildDuration.assert_called_once_with(**testtuple[1])
def test_parse_duration_mockbuilder(self):
mockBuilder = mock.Mock()
expectedargs = {'PnY': '1', 'PnM': '2', 'PnD': '3',
'TnH': '4', 'TnM': '54', 'TnS': '6'}
mockBuilder.build_duration.return_value = expectedargs
result = parse_duration('P1Y2M3DT4H54M6S', builder=mockBuilder)
self.assertEqual(result, expectedargs)
mockBuilder.build_duration.assert_called_once_with(**expectedargs)
def test_parse_duration_nop(self):
with self.assertRaises(ISOFormatError):
#Duration must start with a P
parse_duration('1Y2M3DT4H54M6S', builder=None)
def test_parse_duration_weekcombination(self):
with self.assertRaises(ISOFormatError):
#Week designator cannot be combined with other time designators
#https://bitbucket.org/nielsenb/aniso8601/issues/2/week-designators-should-not-be-combinable
parse_duration('P1Y2W', builder=None)
def test_parse_duration_negative(self):
with self.assertRaises(NegativeDurationError):
parse_duration('P-1Y', builder=None)
with self.assertRaises(NegativeDurationError):
parse_duration('P-2M', builder=None)
with self.assertRaises(NegativeDurationError):
parse_duration('P-3D', builder=None)
with self.assertRaises(NegativeDurationError):
parse_duration('P-T4H', builder=None)
with self.assertRaises(NegativeDurationError):
parse_duration('P-T54M', builder=None)
with self.assertRaises(NegativeDurationError):
parse_duration('P-T6S', builder=None)
with self.assertRaises(NegativeDurationError):
parse_duration('P-7W', builder=None)
with self.assertRaises(NegativeDurationError):
parse_duration('P-1Y2M3DT4H54M6S', builder=None)
def test_parse_duration_outoforder(self):
#Ensure durations are required to be in the correct order
#https://bitbucket.org/nielsenb/aniso8601/issues/7/durations-with-time-components-before-t
#https://bitbucket.org/nielsenb/aniso8601/issues/8/durations-with-components-in-wrong-order
with self.assertRaises(ISOFormatError):
parse_duration('P1S', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P1D1S', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P1H1M', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('1Y2M3D1SPT1M', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P1Y2M3D2MT1S', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P2M3D1ST1Y1M', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P1Y2M2MT3D1S', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('P1D1Y1M', builder=None)
with self.assertRaises(ISOFormatError):
parse_duration('PT1S1H', builder=None)
def test_parse_duration_prescribed(self):
testtuples = (('P1Y2M3DT4H54M6S', {'PnY': '1', 'PnM': '2',
'PnD': '3', 'TnH': '4',
'TnM': '54', 'TnS': '6'}),
('P1Y2M3DT4H54M6,5S', {'PnY': '1', 'PnM': '2',
'PnD': '3', 'TnH': '4',
'TnM': '54', 'TnS': '6.5'}),
('P1Y2M3DT4H54M6.5S', {'PnY': '1', 'PnM': '2',
'PnD': '3', 'TnH': '4',
'TnM': '54', 'TnS': '6.5'}),
('PT4H54M6,5S', {'PnY': None, 'PnM': None, 'PnD': None,
'TnH': '4', 'TnM': '54', 'TnS': '6.5'}),
('PT4H54M6.5S', {'PnY': None, 'PnM': None, 'PnD': None,
'TnH': '4', 'TnM': '54', 'TnS': '6.5'}),
('P1Y2M3D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3'}),
('P1Y2M3,5D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3.5'}),
('P1Y2M3.5D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3.5'}),
('P1Y', {'PnY': '1', 'PnM': None,
'PnW': None, 'PnD': None}),
('P1,5Y', {'PnY': '1.5', 'PnM': None, 'PnW': None,
'PnD': None}),
('P1.5Y', {'PnY': '1.5', 'PnM': None, 'PnW': None,
'PnD': None}),
('P1M', {'PnY': None, 'PnM': '1', 'PnW': None,
'PnD': None}),
('P1,5M', {'PnY': None, 'PnM': '1.5', 'PnW': None,
'PnD':None}),
('P1.5M', {'PnY': None, 'PnM': '1.5', 'PnW': None,
'PnD':None}),
('P1W', {'PnY': None, 'PnM': None, 'PnW': '1',
'PnD': None}),
('P1,5W', {'PnY': None, 'PnM': None, 'PnW': '1.5',
'PnD': None}),
('P1.5W', {'PnY': None, 'PnM': None, 'PnW': '1.5',
'PnD': None}),
('P1D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1'}),
('P1,5D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1.5'}),
('P1.5D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1.5'}))
for testtuple in testtuples:
mockBuilder = mock.Mock()
mockBuilder.build_duration.return_value = testtuple[1]
result = _parse_duration_prescribed(testtuple[0], mockBuilder)
self.assertEqual(result, testtuple[1])
mockBuilder.build_duration.assert_called_once_with(**testtuple[1])
def test_parse_duration_prescribed_negative(self):
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed('P-T1H', builder=None)
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed('P-T2M', builder=None)
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed('P-T3S', builder=None)
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed('P-4W', builder=None)
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed('P-1Y2M3DT4H54M6S', builder=None)
def test_parse_duration_prescribed_multiplefractions(self):
with self.assertRaises(ISOFormatError):
#Multiple fractions are not allowed
_parse_duration_prescribed('P1Y2M3DT4H5.1234M6.1234S', None)
def test_parse_duration_prescribed_middlefraction(self):
with self.assertRaises(ISOFormatError):
#Fraction only allowed on final component
_parse_duration_prescribed('P1Y2M3DT4H5.1234M6S', None)
def test_parse_duration_prescribed_suffixgarbage(self):
#Don't allow garbage after the duration
#https://bitbucket.org/nielsenb/aniso8601/issues/9/durations-with-trailing-garbage-are-parsed
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P1Dasdfasdf', None)
def test_parse_duration_prescribed_outoforder(self):
#Ensure durations are required to be in the correct order
#https://bitbucket.org/nielsenb/aniso8601/issues/7/durations-with-time-components-before-t
#https://bitbucket.org/nielsenb/aniso8601/issues/8/durations-with-components-in-wrong-order
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P1S', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P1D1S', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P1H1M', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('1Y2M3D1SPT1M', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P1Y2M3D2MT1S', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P2M3D1ST1Y1M', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P1Y2M2MT3D1S', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('P1D1Y1M', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed('PT1S1H', None)
def test_parse_duration_prescribed_notime(self):
testtuples = (('P1Y2M3D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3'}),
('P1Y2M3,5D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3.5'}),
('P1Y2M3.5D', {'PnY': '1', 'PnM': '2',
'PnW': None, 'PnD': '3.5'}),
('P1Y', {'PnY': '1', 'PnM': None,
'PnW': None, 'PnD': None}),
('P1,5Y', {'PnY': '1.5', 'PnM': None, 'PnW': None,
'PnD': None}),
('P1.5Y', {'PnY': '1.5', 'PnM': None, 'PnW': None,
'PnD': None}),
('P1M', {'PnY': None, 'PnM': '1', 'PnW': None,
'PnD': None}),
('P1,5M', {'PnY': None, 'PnM': '1.5', 'PnW': None,
'PnD':None}),
('P1.5M', {'PnY': None, 'PnM': '1.5', 'PnW': None,
'PnD':None}),
('P1W', {'PnY': None, 'PnM': None, 'PnW': '1',
'PnD': None}),
('P1,5W', {'PnY': None, 'PnM': None, 'PnW': '1.5',
'PnD': None}),
('P1.5W', {'PnY': None, 'PnM': None, 'PnW': '1.5',
'PnD': None}),
('P1D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1'}),
('P1,5D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1.5'}),
('P1.5D', {'PnY': None, 'PnM': None, 'PnW': None,
'PnD': '1.5'}))
for testtuple in testtuples:
mockBuilder = mock.Mock()
mockBuilder.build_duration.return_value = testtuple[1]
result = _parse_duration_prescribed_notime(testtuple[0], mockBuilder)
self.assertEqual(result, testtuple[1])
mockBuilder.build_duration.assert_called_once_with(**testtuple[1])
def test_parse_duration_prescribed_notime_timepart(self):
#Ensure no time part is allowed
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1S', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1D1S', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1H1M', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1Y2M3D4H', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1Y2M3D4H5S', None)
def test_parse_duration_prescribed_notime_negative(self):
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed_notime('P-1Y', None)
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed_notime('P-2M', None)
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed_notime('P-3D', None)
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed_notime('P-7W', None)
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed_notime('P-1Y2M3D', None)
def test_parse_duration_prescribed_notime_outoforder(self):
#Ensure durations are required to be in the correct order
#https://bitbucket.org/nielsenb/aniso8601/issues/8/durations-with-components-in-wrong-order
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1H1M', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_notime('P1D1Y1M', None)
def test_parse_duration_prescribed_time(self):
testtuples = (('P1Y2M3DT4H54M6S', {'PnY': '1', 'PnM': '2',
'PnD': '3', 'TnH': '4',
'TnM': '54', 'TnS': '6'}),
('P1Y2M3DT4H54M6,5S', {'PnY': '1', 'PnM': '2',
'PnD': '3', 'TnH': '4',
'TnM': '54', 'TnS': '6.5'}),
('P1Y2M3DT4H54M6.5S', {'PnY': '1', 'PnM': '2',
'PnD': '3', 'TnH': '4',
'TnM': '54', 'TnS': '6.5'}),
('PT4H54M6,5S', {'PnY': None, 'PnM': None, 'PnD': None,
'TnH': '4', 'TnM': '54', 'TnS': '6.5'}),
('PT4H54M6.5S', {'PnY': None, 'PnM': None, 'PnD': None,
'TnH': '4', 'TnM': '54', 'TnS': '6.5'}))
for testtuple in testtuples:
mockBuilder = mock.Mock()
mockBuilder.build_duration.return_value = testtuple[1]
result = _parse_duration_prescribed_time(testtuple[0], mockBuilder)
self.assertEqual(result, testtuple[1])
mockBuilder.build_duration.assert_called_once_with(**testtuple[1])
def test_parse_duration_prescribed_time_timeindate(self):
#Don't allow time components in date half
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P1Y2M3D4HT54M6S', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P1Y2M3D6ST4H54M', None)
def test_parse_duration_prescribed_time_dateintime(self):
#Don't allow date components in time half
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P2M3DT1Y4H54M6S', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P1Y2MT3D4H54M6S', None)
def test_parse_duration_prescribed_time_negative(self):
with self.assertRaises(NegativeDurationError):
_parse_duration_prescribed_time('P-1Y2M3DT4H54M6S', None)
def test_parse_duration_prescribed_time_outoforder(self):
#Ensure durations are required to be in the correct order
#https://bitbucket.org/nielsenb/aniso8601/issues/7/durations-with-time-components-before-t
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('1Y2M3D1SPT1M', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P1Y2M3D2MT1S', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P2M3D1ST1Y1M', None)
with self.assertRaises(ISOFormatError):
_parse_duration_prescribed_time('P1Y2M2MT3D1S', None)
def test_parse_duration_combined(self):
testtuples = (('P0003-06-04T12:30:05', {'PnY': '0003', 'PnM': '06',
'PnD': '04', 'TnH': '12',
'TnM': '30', 'TnS': '05'}),
('P0003-06-04T12:30:05,5', {'PnY': '0003', 'PnM': '06',
'PnD': '04', 'TnH': '12',
'TnM': '30', 'TnS': '05.5'}),
('P0003-06-04T12:30:05.5', {'PnY': '0003', 'PnM': '06',
'PnD': '04', 'TnH': '12',
'TnM': '30', 'TnS': '05.5'}),
('P0001-02-03T14:43:59.9999997', {'PnY': '0001',
'PnM': '02',
'PnD': '03',
'TnH': '14',
'TnM': '43',
'TnS':
'59.9999997'}))
for testtuple in testtuples:
mockBuilder = mock.Mock()
mockBuilder.build_duration.return_value = testtuple[1]
result = _parse_duration_combined(testtuple[0], mockBuilder)
self.assertEqual(result, testtuple[1])
mockBuilder.build_duration.assert_called_once_with(**testtuple[1])
def test_parse_duration_combined_suffixgarbage(self):
#Don't allow garbage after the duration
#https://bitbucket.org/nielsenb/aniso8601/issues/9/durations-with-trailing-garbage-are-parsed
with self.assertRaises(ISOFormatError):
_parse_duration_combined('P0003-06-04T12:30:05.5asdfasdf', None)
def test_parse_duration_element(self):
testtuples = (('P1Y2M3D', 'Y', '1'),
('P1Y2M3D', 'M', '2'),
('P1Y2M3D', 'D', '3'),
('PT4H54M6,5S', 'H', '4'),
('PT4H54M6,5S', 'M', '54'),
('PT4H54M6,5S', 'S', '6.5'),
('T4H5M6.1234S', 'H', '4'),
('T4H5M6.1234S', 'M', '5'),
('T4H5M6.1234S', 'S', '6.1234'))
for testtuple in testtuples:
self.assertEqual(_parse_duration_element(testtuple[0],
testtuple[1]),
testtuple[2])
def test_has_any_component(self):
self.assertTrue(_has_any_component('P1Y', ['Y', 'M']))
self.assertFalse(_has_any_component('P1Y', ['M', 'D']))
def test_component_order_correct(self):
self.assertTrue(_component_order_correct('P1Y1M1D',
['P', 'Y', 'M', 'D']))
self.assertTrue(_component_order_correct('P1Y1M',
['P', 'Y', 'M', 'D']))
self.assertFalse(_component_order_correct('P1D1Y1M',
['P', 'Y', 'M', 'D']))
self.assertFalse(_component_order_correct('PT1S1H',
['T', 'H', 'M', 'S']))
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/tests/compat.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import sys
PY2 = sys.version_info[0] == 2
if PY2:
import mock
else:
from unittest import mock
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/tests/test_time.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import unittest
import aniso8601
from aniso8601.resolution import TimeResolution
from aniso8601.time import (get_time_resolution, parse_datetime, parse_time,
_parse_hour, _parse_minute_time,
_parse_second_time, _split_tz)
from aniso8601.tests.compat import mock
class TestTimeParserFunctions(unittest.TestCase):
def test_get_time_resolution(self):
self.assertEqual(get_time_resolution('01:23:45'),
TimeResolution.Seconds)
self.assertEqual(get_time_resolution('24:00:00'),
TimeResolution.Seconds)
self.assertEqual(get_time_resolution('23:21:28,512400'),
TimeResolution.Seconds)
self.assertEqual(get_time_resolution('23:21:28.512400'),
TimeResolution.Seconds)
self.assertEqual(get_time_resolution('01:23'), TimeResolution.Minutes)
self.assertEqual(get_time_resolution('24:00'), TimeResolution.Minutes)
self.assertEqual(get_time_resolution('01:23,4567'),
TimeResolution.Minutes)
self.assertEqual(get_time_resolution('01:23.4567'),
TimeResolution.Minutes)
self.assertEqual(get_time_resolution('012345'), TimeResolution.Seconds)
self.assertEqual(get_time_resolution('240000'), TimeResolution.Seconds)
self.assertEqual(get_time_resolution('0123'), TimeResolution.Minutes)
self.assertEqual(get_time_resolution('2400'), TimeResolution.Minutes)
self.assertEqual(get_time_resolution('01'), TimeResolution.Hours)
self.assertEqual(get_time_resolution('24'), TimeResolution.Hours)
self.assertEqual(get_time_resolution('12,5'), TimeResolution.Hours)
self.assertEqual(get_time_resolution('12.5'), TimeResolution.Hours)
self.assertEqual(get_time_resolution('232128.512400+00:00'),
TimeResolution.Seconds)
self.assertEqual(get_time_resolution('0123.4567+00:00'),
TimeResolution.Minutes)
self.assertEqual(get_time_resolution('01.4567+00:00'),
TimeResolution.Hours)
self.assertEqual(get_time_resolution('01:23:45+00:00'),
TimeResolution.Seconds)
self.assertEqual(get_time_resolution('24:00:00+00:00'),
TimeResolution.Seconds)
self.assertEqual(get_time_resolution('23:21:28.512400+00:00'),
TimeResolution.Seconds)
self.assertEqual(get_time_resolution('01:23+00:00'),
TimeResolution.Minutes)
self.assertEqual(get_time_resolution('24:00+00:00'),
TimeResolution.Minutes)
self.assertEqual(get_time_resolution('01:23.4567+00:00'),
TimeResolution.Minutes)
self.assertEqual(get_time_resolution('23:21:28.512400+11:15'),
TimeResolution.Seconds)
self.assertEqual(get_time_resolution('23:21:28.512400-12:34'),
TimeResolution.Seconds)
self.assertEqual(get_time_resolution('23:21:28.512400Z'),
TimeResolution.Seconds)
self.assertEqual(get_time_resolution('06:14:00.000123Z'),
TimeResolution.Seconds)
def test_parse_time(self):
testtuples = (('01:23:45', {'hh': '01', 'mm': '23',
'ss': '45', 'tz': None}),
('24:00:00', {'hh': '24', 'mm': '00',
'ss': '00', 'tz': None}),
('23:21:28,512400', {'hh': '23', 'mm': '21',
'ss': '28.512400', 'tz': None}),
('23:21:28.512400', {'hh': '23', 'mm': '21',
'ss': '28.512400', 'tz': None}),
('01:03:11.858714', {'hh': '01', 'mm': '03',
'ss': '11.858714', 'tz': None}),
('14:43:59.9999997', {'hh': '14', 'mm': '43',
'ss': '59.9999997', 'tz': None}),
('01:23', {'hh': '01', 'mm': '23', 'tz': None}),
('24:00', {'hh': '24', 'mm': '00', 'tz': None}),
('01:23,4567', {'hh': '01', 'mm': '23.4567',
'tz': None}),
('01:23.4567', {'hh': '01', 'mm': '23.4567',
'tz': None}),
('012345', {'hh': '01', 'mm': '23',
'ss': '45', 'tz': None}),
('240000', {'hh': '24', 'mm': '00',
'ss': '00', 'tz': None}),
('232128,512400', {'hh': '23', 'mm': '21',
'ss': '28.512400', 'tz': None}),
('232128.512400', {'hh': '23', 'mm': '21',
'ss': '28.512400', 'tz': None}),
('010311.858714', {'hh': '01', 'mm': '03',
'ss': '11.858714', 'tz': None}),
('144359.9999997', {'hh': '14', 'mm': '43',
'ss': '59.9999997', 'tz': None}),
('0123', {'hh': '01', 'mm': '23', 'tz': None}),
('2400', {'hh': '24', 'mm': '00', 'tz': None}),
('01', {'hh': '01', 'tz': None}),
('24', {'tz': None}),
('12,5', {'hh': '12.5', 'tz': None}),
('12.5', {'hh': '12.5', 'tz': None}),
('232128,512400+00:00', {'hh': '23', 'mm': '21',
'ss': '28.512400',
'tz': (False, None,
'00', '00',
'+00:00', 'timezone')}),
('232128.512400+00:00', {'hh': '23', 'mm': '21',
'ss': '28.512400',
'tz': (False, None,
'00', '00',
'+00:00', 'timezone')}),
('0123,4567+00:00', {'hh': '01', 'mm': '23.4567',
'tz': (False, None,
'00', '00',
'+00:00', 'timezone')}),
('0123.4567+00:00', {'hh': '01', 'mm': '23.4567',
'tz': (False, None,
'00', '00',
'+00:00', 'timezone')}),
('01,4567+00:00', {'hh': '01.4567',
'tz': (False, None,
'00', '00',
'+00:00', 'timezone')}),
('01.4567+00:00', {'hh': '01.4567',
'tz': (False, None,
'00', '00',
'+00:00', 'timezone')}),
('01:23:45+00:00', {'hh': '01', 'mm': '23',
'ss': '45',
'tz': (False, None,
'00', '00',
'+00:00', 'timezone')}),
('24:00:00+00:00', {'hh': '24', 'mm': '00',
'ss': '00',
'tz': (False, None,
'00', '00',
'+00:00', 'timezone')}),
('23:21:28.512400+00:00', {'hh': '23', 'mm': '21',
'ss': '28.512400',
'tz': (False, None,
'00', '00',
'+00:00',
'timezone')}),
('01:23+00:00', {'hh': '01', 'mm': '23',
'tz': (False, None,
'00', '00',
'+00:00', 'timezone')}),
('24:00+00:00', {'hh': '24', 'mm': '00',
'tz': (False, None,
'00', '00',
'+00:00', 'timezone')}),
('01:23.4567+00:00', {'hh': '01', 'mm': '23.4567',
'tz': (False, None,
'00', '00',
'+00:00', 'timezone')}),
('23:21:28.512400+11:15', {'hh': '23', 'mm': '21',
'ss': '28.512400',
'tz': (False, None,
'11', '15',
'+11:15',
'timezone')}),
('23:21:28.512400-12:34', {'hh': '23', 'mm': '21',
'ss': '28.512400',
'tz': (True, None,
'12', '34',
'-12:34',
'timezone')}),
('23:21:28.512400Z', {'hh': '23', 'mm': '21',
'ss': '28.512400',
'tz': (False, True,
None, None,
'Z', 'timezone')}),
('06:14:00.000123Z', {'hh': '06', 'mm': '14',
'ss': '00.000123',
'tz': (False, True,
None, None,
'Z', 'timezone')}))
for testtuple in testtuples:
with mock.patch.object(aniso8601.time.PythonTimeBuilder,
'build_time') as mockBuildTime:
mockBuildTime.return_value = testtuple[1]
result = parse_time(testtuple[0])
self.assertEqual(result, testtuple[1])
mockBuildTime.assert_called_once_with(**testtuple[1])
def test_parse_time_mockbuilder(self):
mockBuilder = mock.Mock()
expectedargs = {'hh': '01', 'mm': '23', 'ss': '45', 'tz': None}
mockBuilder.build_time.return_value = expectedargs
result = parse_time('01:23:45', builder=mockBuilder)
self.assertEqual(result, expectedargs)
mockBuilder.build_time.assert_called_once_with(**expectedargs)
mockBuilder = mock.Mock()
expectedargs = {'hh': '23', 'mm': '21', 'ss': '28.512400',
'tz': (False, None, '00', '00', '+00:00', 'timezone')}
mockBuilder.build_time.return_value = expectedargs
result = parse_time('232128.512400+00:00', builder=mockBuilder)
self.assertEqual(result, expectedargs)
mockBuilder.build_time.assert_called_once_with(**expectedargs)
mockBuilder = mock.Mock()
expectedargs = {'hh': '23', 'mm': '21', 'ss': '28.512400',
'tz': (False, None, '11', '15', '+11:15', 'timezone')}
mockBuilder.build_time.return_value = expectedargs
result = parse_time('23:21:28.512400+11:15', builder=mockBuilder)
self.assertEqual(result, expectedargs)
mockBuilder.build_time.assert_called_once_with(**expectedargs)
def test_parse_datetime(self):
testtuples = (('2019-06-05T01:03:11,858714',
(('2019', '06', '05', None, None, None, 'date'),
('01', '03', '11.858714',
None, 'time'))),
('2019-06-05T01:03:11.858714',
(('2019', '06', '05', None, None, None, 'date'),
('01', '03', '11.858714',
None, 'time'))),
('1981-04-05T23:21:28.512400Z',
(('1981', '04', '05', None, None, None, 'date'),
('23', '21', '28.512400',
(False, True, None, None, 'Z', 'timezone'),
'time'))),
('1981095T23:21:28.512400-12:34',
(('1981', None, None, None, None, '095', 'date'),
('23', '21', '28.512400',
(True, None, '12', '34', '-12:34', 'timezone'),
'time'))),
('19810405T23:21:28+00',
(('1981', '04', '05', None, None, None, 'date'),
('23', '21', '28',
(False, None, '00', None, '+00', 'timezone'),
'time'))),
('19810405T23:21:28+00:00',
(('1981', '04', '05', None, None, None, 'date'),
('23', '21', '28',
(False, None, '00', '00', '+00:00', 'timezone'),
'time'))))
for testtuple in testtuples:
with mock.patch.object(aniso8601.time.PythonTimeBuilder,
'build_datetime') as mockBuildDateTime:
mockBuildDateTime.return_value = testtuple[1]
result = parse_datetime(testtuple[0])
self.assertEqual(result, testtuple[1])
mockBuildDateTime.assert_called_once_with(*testtuple[1])
def test_parse_datetime_spacedelimited(self):
expectedargs = (('2004', None, None, '53', '6', None, 'date'),
('23', '21', '28.512400',
(True, None, '12', '34', '-12:34', 'timezone'),
'time'))
with mock.patch.object(aniso8601.time.PythonTimeBuilder,
'build_datetime') as mockBuildDateTime:
mockBuildDateTime.return_value = expectedargs
result = parse_datetime('2004-W53-6 23:21:28.512400-12:34',
delimiter=' ')
self.assertEqual(result, expectedargs)
mockBuildDateTime.assert_called_once_with(*expectedargs)
def test_parse_datetime_commadelimited(self):
expectedargs = (('1981', '04', '05', None, None, None, 'date'),
('23', '21', '28.512400',
(False, True, None, None, 'Z', 'timezone'),
'time'))
with mock.patch.object(aniso8601.time.PythonTimeBuilder,
'build_datetime') as mockBuildDateTime:
mockBuildDateTime.return_value = expectedargs
result = parse_datetime('1981-04-05,23:21:28,512400Z',
delimiter=',')
self.assertEqual(result, expectedargs)
mockBuildDateTime.assert_called_once_with(*expectedargs)
def test_parse_datetime_mockbuilder(self):
mockBuilder = mock.Mock()
expectedargs = (('1981', None, None, None, None, '095', 'date'),
('23', '21', '28.512400',
(True, None, '12', '34', '-12:34', 'timezone'),
'time'))
mockBuilder.build_datetime.return_value = expectedargs
result = parse_datetime('1981095T23:21:28.512400-12:34',
builder=mockBuilder)
self.assertEqual(result, expectedargs)
mockBuilder.build_datetime.assert_called_once_with(*expectedargs)
def test_parse_hour(self):
testtuples = (('01', None, {'hh': '01', 'tz': None}),
('24', None, {'tz': None}),
('01.4567', None, {'hh': '01.4567', 'tz': None}),
('12.5', None, {'hh': '12.5', 'tz': None}),
('08', (True, None, '12', '34', '-12:34', 'timezone'),
{'hh': '08', 'tz':
(True, None, '12', '34', '-12:34', 'timezone')}))
for testtuple in testtuples:
mockBuilder = mock.Mock()
mockBuilder.build_time.return_value = testtuple[2]
result = _parse_hour(testtuple[0], testtuple[1], mockBuilder)
self.assertEqual(result, testtuple[2])
mockBuilder.build_time.assert_called_once_with(**testtuple[2])
def test_parse_minute_time(self):
testtuples = (('01:23', None, {'hh': '01', 'mm': '23', 'tz': None}),
('24:00', None, {'hh': '24', 'mm': '00', 'tz': None}),
('01:23.4567', None, {'hh': '01', 'mm': '23.4567',
'tz': None}),
('0123', None, {'hh': '01', 'mm': '23', 'tz': None}),
('2400', None, {'hh': '24', 'mm': '00', 'tz': None}),
('0123.4567', None, {'hh': '01', 'mm': '23.4567',
'tz': None}),
('08:13', (True, None, '12', '34', '-12:34', 'timezone'),
{'hh': '08', 'mm': '13',
'tz': (True, None, '12', '34', '-12:34', 'timezone')}))
for testtuple in testtuples:
mockBuilder = mock.Mock()
mockBuilder.build_time.return_value = testtuple[2]
result = _parse_minute_time(testtuple[0], testtuple[1],
mockBuilder)
self.assertEqual(result, testtuple[2])
mockBuilder.build_time.assert_called_once_with(**testtuple[2])
def test_parse_second_time(self):
testtuples = (('01:23:45', None, {'hh': '01', 'mm': '23',
'ss': '45', 'tz': None}),
('24:00:00', None, {'hh': '24', 'mm': '00',
'ss': '00', 'tz': None}),
('23:21:28.512400', None, {'hh': '23', 'mm': '21',
'ss': '28.512400',
'tz': None}),
('14:43:59.9999997', None, {'hh': '14', 'mm': '43',
'ss': '59.9999997',
'tz': None}),
('012345', None, {'hh': '01', 'mm': '23',
'ss': '45', 'tz': None}),
('240000', None, {'hh': '24', 'mm': '00',
'ss': '00', 'tz': None}),
('232128.512400', None, {'hh': '23', 'mm': '21',
'ss': '28.512400', 'tz': None}),
('144359.9999997', None, {'hh': '14', 'mm': '43',
'ss': '59.9999997',
'tz': None}),
('08:22:21',
(True, None, '12', '34', '-12:34', 'timezone'),
{'hh': '08', 'mm': '22', 'ss': '21',
'tz': (True, None, '12', '34', '-12:34', 'timezone')}))
for testtuple in testtuples:
mockBuilder = mock.Mock()
mockBuilder.build_time.return_value = testtuple[2]
result = _parse_second_time(testtuple[0], testtuple[1],
mockBuilder)
self.assertEqual(result, testtuple[2])
mockBuilder.build_time.assert_called_once_with(**testtuple[2])
def test_split_tz(self):
self.assertEqual(_split_tz('01:23:45'), ('01:23:45', None))
self.assertEqual(_split_tz('24:00:00'), ('24:00:00', None))
self.assertEqual(_split_tz('23:21:28.512400'),
('23:21:28.512400', None))
self.assertEqual(_split_tz('01:23'), ('01:23', None))
self.assertEqual(_split_tz('24:00'), ('24:00', None))
self.assertEqual(_split_tz('01:23.4567'), ('01:23.4567', None))
self.assertEqual(_split_tz('012345'), ('012345', None))
self.assertEqual(_split_tz('240000'), ('240000', None))
self.assertEqual(_split_tz('0123'), ('0123', None))
self.assertEqual(_split_tz('2400'), ('2400', None))
self.assertEqual(_split_tz('01'), ('01', None))
self.assertEqual(_split_tz('24'), ('24', None))
self.assertEqual(_split_tz('12.5'), ('12.5', None))
self.assertEqual(_split_tz('232128.512400+00:00'),
('232128.512400', '+00:00'))
self.assertEqual(_split_tz('0123.4567+00:00'), ('0123.4567', '+00:00'))
self.assertEqual(_split_tz('01.4567+00:00'), ('01.4567', '+00:00'))
self.assertEqual(_split_tz('01:23:45+00:00'), ('01:23:45', '+00:00'))
self.assertEqual(_split_tz('24:00:00+00:00'), ('24:00:00', '+00:00'))
self.assertEqual(_split_tz('23:21:28.512400+00:00'),
('23:21:28.512400', '+00:00'))
self.assertEqual(_split_tz('01:23+00:00'), ('01:23', '+00:00'))
self.assertEqual(_split_tz('24:00+00:00'), ('24:00', '+00:00'))
self.assertEqual(_split_tz('01:23.4567+00:00'),
('01:23.4567', '+00:00'))
self.assertEqual(_split_tz('23:21:28.512400+11:15'),
('23:21:28.512400', '+11:15'))
self.assertEqual(_split_tz('23:21:28.512400-12:34'),
('23:21:28.512400', '-12:34'))
self.assertEqual(_split_tz('23:21:28.512400Z'),
('23:21:28.512400', 'Z'))
self.assertEqual(_split_tz('06:14:00.000123Z'),
('06:14:00.000123', 'Z'))
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/tests/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/tests/test_interval.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import unittest
import aniso8601
from aniso8601.exceptions import ISOFormatError
from aniso8601.interval import (_parse_interval, parse_interval,
parse_repeating_interval)
from aniso8601.tests.compat import mock
class TestIntervalParserFunctions(unittest.TestCase):
def test_parse_interval(self):
testtuples = (('P1M/1981-04-05T01:01:00',
{'end': (('1981', '04', '05', None, None, None, 'date'),
('01', '01', '00', None, 'time'), 'datetime'),
'duration': (None, '1', None, None, None, None, None,
'duration')}),
('P1M/1981-04-05',
{'end': ('1981', '04', '05', None, None, None, 'date'),
'duration': (None, '1', None, None, None, None, None,
'duration')}),
('P1,5Y/2018-03-06',
{'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': ('1.5', None, None, None, None, None, None,
'duration')}),
('P1.5Y/2018-03-06',
{'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': ('1.5', None, None, None, None, None, None,
'duration')}),
('PT1H/2014-11-12',
{'end': ('2014', '11', '12', None, None, None, 'date'),
'duration': (None, None, None, None, '1', None, None,
'duration')}),
('PT4H54M6.5S/2014-11-12',
{'end': ('2014', '11', '12', None, None, None, 'date'),
'duration': (None, None, None, None, '4', '54', '6.5',
'duration')}),
('PT10H/2050-03-01T13:00:00Z',
{'end': (('2050', '03', '01',
None, None, None, 'date'),
('13', '00', '00',
(False, True, None, None,
'Z', 'timezone'), 'time'), 'datetime'),
'duration': (None, None, None,
None, '10', None, None, 'duration')}),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
('PT0.0000001S/2018-03-06',
{'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'0.0000001', 'duration')}),
('PT2.0000048S/2018-03-06',
{'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'2.0000048', 'duration')}),
('1981-04-05T01:01:00/P1M1DT1M',
{'start': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'duration': (None, '1', None,
'1', None, '1', None, 'duration')}),
('1981-04-05/P1M1D',
{'start': ('1981', '04', '05',
None, None, None, 'date'),
'duration': (None, '1', None,
'1', None, None, None, 'duration')}),
('2018-03-06/P2,5M',
{'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, '2.5', None,
None, None, None, None, 'duration')}),
('2018-03-06/P2.5M',
{'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, '2.5', None,
None, None, None, None, 'duration')}),
('2014-11-12/PT1H',
{'start': ('2014', '11', '12',
None, None, None, 'date'),
'duration': (None, None, None,
None, '1', None, None, 'duration')}),
('2014-11-12/PT4H54M6.5S',
{'start': ('2014', '11', '12',
None, None, None, 'date'),
'duration': (None, None, None,
None, '4', '54', '6.5', 'duration')}),
('2050-03-01T13:00:00Z/PT10H',
{'start': (('2050', '03', '01',
None, None, None, 'date'),
('13', '00', '00',
(False, True, None, None,
'Z', 'timezone'), 'time'), 'datetime'),
'duration': (None, None, None,
None, '10', None, None, 'duration')}),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
('2018-03-06/PT0.0000001S',
{'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'0.0000001', 'duration')}),
('2018-03-06/PT2.0000048S',
{'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'2.0000048', 'duration')}),
('1980-03-05T01:01:00/1981-04-05T01:01:00',
{'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime')}),
('1980-03-05T01:01:00/1981-04-05',
{'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
'end': ('1981', '04', '05',
None, None, None, 'date')}),
('1980-03-05/1981-04-05T01:01:00',
{'start': ('1980', '03', '05',
None, None, None, 'date'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime')}),
('1980-03-05/1981-04-05',
{'start': ('1980', '03', '05',
None, None, None, 'date'),
'end': ('1981', '04', '05',
None, None, None, 'date')}),
('1981-04-05/1980-03-05',
{'start': ('1981', '04', '05',
None, None, None, 'date'),
'end': ('1980', '03', '05',
None, None, None, 'date')}),
('2050-03-01T13:00:00Z/2050-05-11T15:30:00Z',
{'start': (('2050', '03', '01',
None, None, None, 'date'),
('13', '00', '00',
(False, True, None, None,
'Z', 'timezone'), 'time'), 'datetime'),
'end': (('2050', '05', '11',
None, None, None, 'date'),
('15', '30', '00',
(False, True, None, None,
'Z', 'timezone'), 'time'), 'datetime')}),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
('1980-03-05T01:01:00.0000001/'
'1981-04-05T14:43:59.9999997',
{'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00.0000001',
None, 'time'), 'datetime'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('14', '43', '59.9999997', None, 'time'),
'datetime')}))
for testtuple in testtuples:
with mock.patch.object(aniso8601.interval.PythonTimeBuilder,
'build_interval') as mockBuildInterval:
mockBuildInterval.return_value = testtuple[1]
result = parse_interval(testtuple[0])
self.assertEqual(result, testtuple[1])
mockBuildInterval.assert_called_once_with(**testtuple[1])
#Test different separators
with mock.patch.object(aniso8601.interval.PythonTimeBuilder,
'build_interval') as mockBuildInterval:
expectedargs = {'start': (('1980', '03', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'end':(('1981', '04', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime')}
mockBuildInterval.return_value = expectedargs
result = parse_interval('1980-03-05T01:01:00--1981-04-05T01:01:00',
intervaldelimiter='--')
self.assertEqual(result, expectedargs)
mockBuildInterval.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.interval.PythonTimeBuilder,
'build_interval') as mockBuildInterval:
expectedargs = {'start': (('1980', '03', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'end':(('1981', '04', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime')}
mockBuildInterval.return_value = expectedargs
result = parse_interval('1980-03-05 01:01:00/1981-04-05 01:01:00',
datetimedelimiter=' ')
self.assertEqual(result, expectedargs)
mockBuildInterval.assert_called_once_with(**expectedargs)
def test_parse_interval_mockbuilder(self):
mockBuilder = mock.Mock()
expectedargs = {'end': (('1981', '04', '05', None, None, None, 'date'),
('01', '01', '00', None, 'time'), 'datetime'),
'duration':(None, '1', None, None, None, None, None,
'duration')}
mockBuilder.build_interval.return_value = expectedargs
result = parse_interval('P1M/1981-04-05T01:01:00', builder=mockBuilder)
self.assertEqual(result, expectedargs)
mockBuilder.build_interval.assert_called_once_with(**expectedargs)
mockBuilder = mock.Mock()
expectedargs = {'start': ('2014', '11', '12', None, None, None,
'date'),
'duration': (None, None, None, None, '1', None, None,
'duration')}
mockBuilder.build_interval.return_value = expectedargs
result = parse_interval('2014-11-12/PT1H', builder=mockBuilder)
self.assertEqual(result, expectedargs)
mockBuilder.build_interval.assert_called_once_with(**expectedargs)
mockBuilder = mock.Mock()
expectedargs = {'start': (('1980', '03', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'end': (('1981', '04', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime')}
mockBuilder.build_interval.return_value = expectedargs
result = parse_interval('1980-03-05T01:01:00/1981-04-05T01:01:00',
builder=mockBuilder)
self.assertEqual(result, expectedargs)
mockBuilder.build_interval.assert_called_once_with(**expectedargs)
def test_parse_interval_repeating(self):
#Parse interval can't parse repeating intervals
with self.assertRaises(ISOFormatError):
parse_interval('R3/1981-04-05/P1D')
with self.assertRaises(ISOFormatError):
parse_interval('R3/1981-04-05/P0003-06-04T12:30:05.5')
with self.assertRaises(ISOFormatError):
parse_interval('R/PT1H2M/1980-03-05T01:01:00')
def test_parse_interval_suffixgarbage(self):
#Don't allow garbage after the duration
#https://bitbucket.org/nielsenb/aniso8601/issues/9/durations-with-trailing-garbage-are-parsed
with self.assertRaises(ValueError):
parse_interval('2001/P1Dasdf', builder=None)
with self.assertRaises(ValueError):
parse_interval('P1Dasdf/2001', builder=None)
with self.assertRaises(ValueError):
parse_interval('2001/P0003-06-04T12:30:05.5asdfasdf', builder=None)
with self.assertRaises(ValueError):
parse_interval('P0003-06-04T12:30:05.5asdfasdf/2001', builder=None)
class TestRepeatingIntervalParserFunctions(unittest.TestCase):
def test_parse_repeating_interval(self):
with mock.patch.object(aniso8601.interval.PythonTimeBuilder,
'build_repeating_interval') as mockBuilder:
expectedargs = {'R': False, 'Rnn': '3',
'interval': (('1981', '04', '05', None, None, None,
'date'),
None,
(None, None, None, '1', None, None,
None, 'duration'),
'interval')}
mockBuilder.return_value = expectedargs
result = parse_repeating_interval('R3/1981-04-05/P1D')
self.assertEqual(result, expectedargs)
mockBuilder.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.interval.PythonTimeBuilder,
'build_repeating_interval') as mockBuilder:
expectedargs = {'R': False, 'Rnn': '11',
'interval': (None,
(('1980', '03', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(None, None, None, None, '1', '2',
None, 'duration'),
'interval')}
mockBuilder.return_value = expectedargs
result = parse_repeating_interval('R11/PT1H2M/1980-03-05T01:01:00')
self.assertEqual(result, expectedargs)
mockBuilder.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.interval.PythonTimeBuilder,
'build_repeating_interval') as mockBuilder:
expectedargs = {'R': False, 'Rnn': '2',
'interval': ((('1980', '03', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(('1981', '04', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
None,
'interval')}
mockBuilder.return_value = expectedargs
result = parse_repeating_interval('R2--1980-03-05T01:01:00--'
'1981-04-05T01:01:00',
intervaldelimiter='--')
self.assertEqual(result, expectedargs)
mockBuilder.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.interval.PythonTimeBuilder,
'build_repeating_interval') as mockBuilder:
expectedargs = {'R': False, 'Rnn': '2',
'interval': ((('1980', '03', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(('1981', '04', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
None,
'interval')}
mockBuilder.return_value = expectedargs
result = parse_repeating_interval('R2/'
'1980-03-05 01:01:00/'
'1981-04-05 01:01:00',
datetimedelimiter=' ')
self.assertEqual(result, expectedargs)
mockBuilder.assert_called_once_with(**expectedargs)
with mock.patch.object(aniso8601.interval.PythonTimeBuilder,
'build_repeating_interval') as mockBuilder:
expectedargs = {'R': True, 'Rnn': None,
'interval': (None,
(('1980', '03', '05', None, None,
None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(None, None, None, None, '1', '2',
None, 'duration'),
'interval')}
mockBuilder.return_value = expectedargs
result = parse_repeating_interval('R/PT1H2M/1980-03-05T01:01:00')
self.assertEqual(result, expectedargs)
mockBuilder.assert_called_once_with(**expectedargs)
def test_parse_repeating_interval_mockbuilder(self):
mockBuilder = mock.Mock()
args = {'R': False, 'Rnn': '3',
'interval': (('1981', '04', '05', None, None, None,
'date'),
None,
(None, None, None, '1', None, None,
None, 'duration'),
'interval')}
mockBuilder.build_repeating_interval.return_value = args
result = parse_repeating_interval('R3/1981-04-05/P1D',
builder=mockBuilder)
self.assertEqual(result, args)
mockBuilder.build_repeating_interval.assert_called_once_with(**args)
mockBuilder = mock.Mock()
args = {'R': False, 'Rnn': '11',
'interval': (None,
(('1980', '03', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(None, None, None, None, '1', '2',
None, 'duration'),
'interval')}
mockBuilder.build_repeating_interval.return_value = args
result = parse_repeating_interval('R11/PT1H2M/1980-03-05T01:01:00',
builder=mockBuilder)
self.assertEqual(result, args)
mockBuilder.build_repeating_interval.assert_called_once_with(**args)
mockBuilder = mock.Mock()
args = {'R': True, 'Rnn': None,
'interval': (None,
(('1980', '03', '05', None, None, None,
'date'),
('01', '01', '00', None, 'time'),
'datetime'),
(None, None, None, None, '1', '2',
None, 'duration'),
'interval')}
mockBuilder.build_repeating_interval.return_value = args
result = parse_repeating_interval('R/PT1H2M/1980-03-05T01:01:00',
builder=mockBuilder)
self.assertEqual(result, args)
mockBuilder.build_repeating_interval.assert_called_once_with(**args)
def test_parse_repeating_interval_suffixgarbage(self):
#Don't allow garbage after the duration
#https://bitbucket.org/nielsenb/aniso8601/issues/9/durations-with-trailing-garbage-are-parsed
with self.assertRaises(ISOFormatError):
parse_repeating_interval('R3/1981-04-05/P1Dasdf', builder=None)
with self.assertRaises(ISOFormatError):
parse_repeating_interval('R3/'
'1981-04-05/'
'P0003-06-04T12:30:05.5asdfasdf',
builder=None)
def test_parse_interval_internal(self):
#Test the internal _parse_interval function
testtuples = (('P1M/1981-04-05T01:01:00',
{'end': (('1981', '04', '05', None, None, None, 'date'),
('01', '01', '00', None, 'time'), 'datetime'),
'duration': (None, '1', None, None, None, None, None,
'duration')}),
('P1M/1981-04-05',
{'end': ('1981', '04', '05', None, None, None, 'date'),
'duration': (None, '1', None, None, None, None, None,
'duration')}),
('P1,5Y/2018-03-06',
{'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': ('1.5', None, None, None, None, None, None,
'duration')}),
('P1.5Y/2018-03-06',
{'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': ('1.5', None, None, None, None, None, None,
'duration')}),
('PT1H/2014-11-12',
{'end': ('2014', '11', '12', None, None, None, 'date'),
'duration': (None, None, None, None, '1', None, None,
'duration')}),
('PT4H54M6.5S/2014-11-12',
{'end': ('2014', '11', '12', None, None, None, 'date'),
'duration': (None, None, None, None, '4', '54', '6.5',
'duration')}),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
('PT0.0000001S/2018-03-06',
{'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'0.0000001', 'duration')}),
('PT2.0000048S/2018-03-06',
{'end': ('2018', '03', '06', None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'2.0000048', 'duration')}),
('1981-04-05T01:01:00/P1M1DT1M',
{'start': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00', None, 'time'),
'datetime'),
'duration': (None, '1', None,
'1', None, '1', None, 'duration')}),
('1981-04-05/P1M1D',
{'start': ('1981', '04', '05',
None, None, None, 'date'),
'duration': (None, '1', None,
'1', None, None, None, 'duration')}),
('2018-03-06/P2,5M',
{'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, '2.5', None,
None, None, None, None, 'duration')}),
('2018-03-06/P2.5M',
{'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, '2.5', None,
None, None, None, None, 'duration')}),
('2014-11-12/PT1H',
{'start': ('2014', '11', '12',
None, None, None, 'date'),
'duration': (None, None, None,
None, '1', None, None, 'duration')}),
('2014-11-12/PT4H54M6.5S',
{'start': ('2014', '11', '12',
None, None, None, 'date'),
'duration': (None, None, None,
None, '4', '54', '6.5', 'duration')}),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
('2018-03-06/PT0.0000001S',
{'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'0.0000001', 'duration')}),
('2018-03-06/PT2.0000048S',
{'start': ('2018', '03', '06',
None, None, None, 'date'),
'duration': (None, None, None,
None, None, None,
'2.0000048', 'duration')}),
('1980-03-05T01:01:00/1981-04-05T01:01:00',
{'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime')}),
('1980-03-05T01:01:00/1981-04-05',
{'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime'),
'end': ('1981', '04', '05',
None, None, None, 'date')}),
('1980-03-05/1981-04-05T01:01:00',
{'start': ('1980', '03', '05',
None, None, None, 'date'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('01', '01', '00',
None, 'time'), 'datetime')}),
('1980-03-05/1981-04-05',
{'start': ('1980', '03', '05',
None, None, None, 'date'),
'end': ('1981', '04', '05',
None, None, None, 'date')}),
('1981-04-05/1980-03-05',
{'start': ('1981', '04', '05',
None, None, None, 'date'),
'end': ('1980', '03', '05',
None, None, None, 'date')}),
#Make sure we truncate, not round
#https://bitbucket.org/nielsenb/aniso8601/issues/10/sub-microsecond-precision-in-durations-is
('1980-03-05T01:01:00.0000001/'
'1981-04-05T14:43:59.9999997',
{'start': (('1980', '03', '05',
None, None, None, 'date'),
('01', '01', '00.0000001',
None, 'time'), 'datetime'),
'end': (('1981', '04', '05',
None, None, None, 'date'),
('14', '43', '59.9999997', None, 'time'),
'datetime')}))
for testtuple in testtuples:
mockBuilder = mock.Mock()
mockBuilder.build_interval.return_value = testtuple[1]
result = _parse_interval(testtuple[0], mockBuilder)
self.assertEqual(result, testtuple[1])
mockBuilder.build_interval.assert_called_once_with(**testtuple[1])
#Test different separators
expectedargs = {'start': (('1980', '03', '05',
None, None, None,
'date'),
('01', '01', '00',
None, 'time'),
'datetime'),
'end': (('1981', '04', '05',
None, None, None,
'date'),
('01', '01', '00',
None, 'time'),
'datetime')}
mockBuilder = mock.Mock()
mockBuilder.build_interval.return_value = expectedargs
result = _parse_interval('1980-03-05T01:01:00--1981-04-05T01:01:00',
mockBuilder,
intervaldelimiter='--')
self.assertEqual(result, expectedargs)
mockBuilder.build_interval.assert_called_once_with(**expectedargs)
expectedargs = {'start': (('1980', '03', '05',
None, None, None,
'date'),
('01', '01', '00',
None, 'time'),
'datetime'),
'end': (('1981', '04', '05',
None, None, None,
'date'),
('01', '01', '00',
None, 'time'),
'datetime')}
mockBuilder = mock.Mock()
mockBuilder.build_interval.return_value = expectedargs
_parse_interval('1980-03-05 01:01:00/1981-04-05 01:01:00',
mockBuilder,
datetimedelimiter=' ')
self.assertEqual(result, expectedargs)
mockBuilder.build_interval.assert_called_once_with(**expectedargs)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/tests/test_decimalfraction.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import unittest
from aniso8601.decimalfraction import find_separator, normalize
class TestDecimalFractionFunctions(unittest.TestCase):
def test_find_separator(self):
self.assertEqual(find_separator(''), -1)
self.assertEqual(find_separator('1234'), -1)
self.assertEqual(find_separator('12.345'), 2)
self.assertEqual(find_separator('123,45'), 3)
def test_normalize(self):
self.assertEqual(normalize(''), '')
self.assertEqual(normalize('12.34'), '12.34')
self.assertEqual(normalize('123,45'), '123.45')
self.assertEqual(normalize('123,45,67'), '123.45.67')
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/tests/test_timezone.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import unittest
import aniso8601
from aniso8601.exceptions import ISOFormatError
from aniso8601.timezone import parse_timezone
from aniso8601.tests.compat import mock
class TestTimezoneParserFunctions(unittest.TestCase):
def test_parse_timezone(self):
testtuples = (('Z', {'negative': False, 'Z': True, 'name': 'Z'}),
('+00:00', {'negative': False, 'hh': '00', 'mm': '00',
'name': '+00:00'}),
('+01:00', {'negative': False, 'hh': '01', 'mm': '00',
'name': '+01:00'}),
('-01:00', {'negative': True, 'hh': '01', 'mm': '00',
'name': '-01:00'}),
('+00:12', {'negative': False, 'hh': '00', 'mm': '12',
'name': '+00:12'}),
('+01:23', {'negative': False, 'hh': '01', 'mm': '23',
'name': '+01:23'}),
('-01:23', {'negative': True, 'hh': '01', 'mm': '23',
'name': '-01:23'}),
('+0000', {'negative': False, 'hh': '00', 'mm': '00',
'name': '+0000'}),
('+0100', {'negative': False, 'hh': '01', 'mm': '00',
'name': '+0100'}),
('-0100', {'negative': True, 'hh': '01', 'mm': '00',
'name': '-0100'}),
('+0012', {'negative': False, 'hh': '00', 'mm': '12',
'name': '+0012'}),
('+0123', {'negative': False, 'hh': '01', 'mm': '23',
'name': '+0123'}),
('-0123', {'negative': True, 'hh': '01', 'mm': '23',
'name': '-0123'}),
('+00', {'negative': False, 'hh': '00', 'mm': None,
'name': '+00'}),
('+01', {'negative': False, 'hh': '01', 'mm': None,
'name': '+01'}),
('-01', {'negative': True, 'hh': '01', 'mm': None,
'name': '-01'}),
('+12', {'negative': False, 'hh': '12', 'mm': None,
'name': '+12'}),
('-12', {'negative': True, 'hh': '12', 'mm': None,
'name': '-12'}))
for testtuple in testtuples:
with mock.patch.object(aniso8601.timezone.PythonTimeBuilder,
'build_timezone') as mockBuildTimezone:
mockBuildTimezone.return_value = testtuple[1]
result = parse_timezone(testtuple[0])
self.assertEqual(result, testtuple[1])
mockBuildTimezone.assert_called_once_with(**testtuple[1])
def test_parse_timezone_mockbuilder(self):
mockBuilder = mock.Mock()
expectedargs = {'negative': False, 'Z': True, 'name': 'Z'}
mockBuilder.build_timezone.return_value = expectedargs
result = parse_timezone('Z', builder=mockBuilder)
self.assertEqual(result, expectedargs)
mockBuilder.build_timezone.assert_called_once_with(**expectedargs)
mockBuilder = mock.Mock()
expectedargs = {'negative': False, 'hh': '00', 'mm': '00',
'name': '+00:00'}
mockBuilder.build_timezone.return_value = expectedargs
result = parse_timezone('+00:00', builder=mockBuilder)
self.assertEqual(result, expectedargs)
mockBuilder.build_timezone.assert_called_once_with(**expectedargs)
mockBuilder = mock.Mock()
expectedargs = {'negative': True, 'hh': '01', 'mm': '23',
'name': '-01:23'}
mockBuilder.build_timezone.return_value = expectedargs
result = parse_timezone('-01:23', builder=mockBuilder)
self.assertEqual(result, expectedargs)
mockBuilder.build_timezone.assert_called_once_with(**expectedargs)
def test_parse_timezone_badstr(self):
with self.assertRaises(ISOFormatError):
parse_timezone('Y', builder=None)
with self.assertRaises(ISOFormatError):
parse_timezone(' Z', builder=None)
with self.assertRaises(ISOFormatError):
parse_timezone('Z ', builder=None)
with self.assertRaises(ISOFormatError):
parse_timezone(' Z ', builder=None)
def test_parse_timezone_negativezero(self):
#A 0 offset cannot be negative
with self.assertRaises(ISOFormatError):
parse_timezone('-00:00', builder=None)
with self.assertRaises(ISOFormatError):
parse_timezone('-0000', builder=None)
with self.assertRaises(ISOFormatError):
parse_timezone('-00', builder=None)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/tests/test_date.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import unittest
import aniso8601
from aniso8601.exceptions import ISOFormatError
from aniso8601.date import (parse_date, _parse_year, _parse_calendar_day,
_parse_calendar_month, _parse_week_day,
_parse_week, _parse_ordinal_date,
get_date_resolution)
from aniso8601.resolution import DateResolution
from aniso8601.tests.compat import mock
class TestDateResolutionFunctions(unittest.TestCase):
def test_get_date_resolution_year(self):
self.assertEqual(get_date_resolution('2013'), DateResolution.Year)
self.assertEqual(get_date_resolution('0001'), DateResolution.Year)
self.assertEqual(get_date_resolution('19'), DateResolution.Year)
def test_get_date_resolution_month(self):
self.assertEqual(get_date_resolution('1981-04'), DateResolution.Month)
def test_get_date_resolution_week(self):
self.assertEqual(get_date_resolution('2004-W53'), DateResolution.Week)
self.assertEqual(get_date_resolution('2009-W01'), DateResolution.Week)
self.assertEqual(get_date_resolution('2004W53'), DateResolution.Week)
def test_get_date_resolution_year_weekday(self):
self.assertEqual(get_date_resolution('2004-W53-6'),
DateResolution.Weekday)
self.assertEqual(get_date_resolution('2004W536'),
DateResolution.Weekday)
def test_get_date_resolution_year_ordinal(self):
self.assertEqual(get_date_resolution('1981-095'),
DateResolution.Ordinal)
self.assertEqual(get_date_resolution('1981095'),
DateResolution.Ordinal)
class TestDateParserFunctions(unittest.TestCase):
def test_parse_date(self):
testtuples = (('2013', {'YYYY': '2013'}),
('0001', {'YYYY': '0001'}),
('19', {'YYYY': '19'}),
('1981-04-05', {'YYYY': '1981', 'MM': '04', 'DD': '05'}),
('19810405', {'YYYY': '1981', 'MM': '04', 'DD': '05'}),
('1981-04', {'YYYY': '1981', 'MM': '04'}),
('2004-W53', {'YYYY': '2004', 'Www': '53'}),
('2009-W01', {'YYYY': '2009', 'Www': '01'}),
('2004-W53-6', {'YYYY': '2004', 'Www': '53', 'D': '6'}),
('2004W53', {'YYYY': '2004', 'Www': '53'}),
('2004W536', {'YYYY': '2004', 'Www': '53', 'D': '6'}),
('1981-095', {'YYYY': '1981', 'DDD': '095'}),
('1981095', {'YYYY': '1981', 'DDD': '095'}))
for testtuple in testtuples:
with mock.patch.object(aniso8601.date.PythonTimeBuilder,
'build_date') as mockBuildDate:
mockBuildDate.return_value = testtuple[1]
result = parse_date(testtuple[0])
self.assertEqual(result, testtuple[1])
mockBuildDate.assert_called_once_with(**testtuple[1])
def test_parse_date_mockbuilder(self):
mockBuilder = mock.Mock()
expectedargs = {'YYYY': '1981', 'MM': '04', 'DD':'05'}
mockBuilder.build_date.return_value = expectedargs
result = parse_date('1981-04-05', builder=mockBuilder)
self.assertEqual(result, expectedargs)
mockBuilder.build_date.assert_called_once_with(**expectedargs)
def test_parse_year(self):
testtuples = (('2013', {'YYYY': '2013'}),
('0001', {'YYYY': '0001'}),
('1', {'YYYY': '1'}),
('19', {'YYYY': '19'}))
for testtuple in testtuples:
mockBuilder = mock.Mock()
mockBuilder.build_date.return_value = testtuple[1]
result = _parse_year(testtuple[0], mockBuilder)
self.assertEqual(result, testtuple[1])
mockBuilder.build_date.assert_called_once_with(**testtuple[1])
def test_parse_calendar_day(self):
testtuples = (('1981-04-05', {'YYYY': '1981', 'MM': '04', 'DD': '05'}),
('19810405', {'YYYY': '1981', 'MM': '04', 'DD': '05'}))
for testtuple in testtuples:
mockBuilder = mock.Mock()
mockBuilder.build_date.return_value = testtuple[1]
result = _parse_calendar_day(testtuple[0], mockBuilder)
self.assertEqual(result, testtuple[1])
mockBuilder.build_date.assert_called_once_with(**testtuple[1])
def test_parse_calendar_month(self):
testtuples = (('1981-04', {'YYYY': '1981', 'MM': '04'}),)
for testtuple in testtuples:
mockBuilder = mock.Mock()
mockBuilder.build_date.return_value = testtuple[1]
result = _parse_calendar_month(testtuple[0], mockBuilder)
self.assertEqual(result, testtuple[1])
mockBuilder.build_date.assert_called_once_with(**testtuple[1])
def test_parse_calendar_month_nohyphen(self):
#Hyphen is required
with self.assertRaises(ISOFormatError):
_parse_calendar_month('198104', None)
def test_parse_week_day(self):
testtuples = (('2004-W53-6', {'YYYY': '2004', 'Www': '53', 'D': '6'}),
('2009-W01-1', {'YYYY': '2009', 'Www': '01', 'D': '1'}),
('2009-W53-7', {'YYYY': '2009', 'Www': '53', 'D': '7'}),
('2010-W01-1', {'YYYY': '2010', 'Www': '01', 'D': '1'}),
('2004W536', {'YYYY': '2004', 'Www': '53', 'D': '6'}),
('2009W011', {'YYYY': '2009', 'Www': '01', 'D': '1'}),
('2009W537', {'YYYY': '2009', 'Www': '53', 'D': '7'}),
('2010W011', {'YYYY': '2010', 'Www': '01', 'D': '1'}))
for testtuple in testtuples:
mockBuilder = mock.Mock()
mockBuilder.build_date.return_value = testtuple[1]
result = _parse_week_day(testtuple[0], mockBuilder)
self.assertEqual(result, testtuple[1])
mockBuilder.build_date.assert_called_once_with(**testtuple[1])
def test_parse_week(self):
testtuples = (('2004-W53', {'YYYY': '2004', 'Www': '53'}),
('2009-W01', {'YYYY': '2009', 'Www': '01'}),
('2009-W53', {'YYYY': '2009', 'Www': '53'}),
('2010-W01', {'YYYY': '2010', 'Www': '01'}),
('2004W53', {'YYYY': '2004', 'Www': '53'}),
('2009W01', {'YYYY': '2009', 'Www': '01'}),
('2009W53', {'YYYY': '2009', 'Www': '53'}),
('2010W01', {'YYYY': '2010', 'Www': '01'}))
for testtuple in testtuples:
mockBuilder = mock.Mock()
mockBuilder.build_date.return_value = testtuple[1]
result = _parse_week(testtuple[0], mockBuilder)
self.assertEqual(result, testtuple[1])
mockBuilder.build_date.assert_called_once_with(**testtuple[1])
def test_parse_ordinal_date(self):
testtuples = (('1981-095', {'YYYY': '1981', 'DDD': '095'}),
('1981095', {'YYYY': '1981', 'DDD': '095'}),
('1981365', {'YYYY': '1981', 'DDD': '365'}),
('1980366', {'YYYY': '1980', 'DDD': '366'}))
for testtuple in testtuples:
mockBuilder = mock.Mock()
mockBuilder.build_date.return_value = testtuple[1]
result = _parse_ordinal_date(testtuple[0], mockBuilder)
self.assertEqual(result, testtuple[1])
mockBuilder.build_date.assert_called_once_with(**testtuple[1])
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/aniso8601/tests/test_init.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Brandon Nielsen
# All rights reserved.
#
# This software may be modified and distributed under the terms
# of the BSD license. See the LICENSE file for details.
import unittest
import aniso8601
class TestInitFunctions(unittest.TestCase):
def test_import(self):
#Verify the function mappings
self.assertEqual(aniso8601.parse_datetime,
aniso8601.time.parse_datetime)
self.assertEqual(aniso8601.parse_time, aniso8601.time.parse_time)
self.assertEqual(aniso8601.get_time_resolution,
aniso8601.time.get_time_resolution)
self.assertEqual(aniso8601.parse_date, aniso8601.date.parse_date)
self.assertEqual(aniso8601.get_date_resolution,
aniso8601.date.get_date_resolution)
self.assertEqual(aniso8601.parse_duration,
aniso8601.duration.parse_duration)
self.assertEqual(aniso8601.parse_interval,
aniso8601.interval.parse_interval)
self.assertEqual(aniso8601.parse_repeating_interval,
aniso8601.interval.parse_repeating_interval)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/autopep8-1.5.6.dist-info/RECORD
|
../../Scripts/autopep8.exe,sha256=kqJlFiAiJRRn3gGS_10HUEzYF_tHR8IPIfvGM35_vsc,106409
__pycache__/autopep8.cpython-39.pyc,,
autopep8-1.5.6.dist-info/AUTHORS.rst,sha256=tiTPsbzGl9dtXCMEWXbWSV1zan1M-BoWtiixs46GIWk,2003
autopep8-1.5.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
autopep8-1.5.6.dist-info/LICENSE,sha256=jR0COOSFQ0QZFMqwdB1N4-Bwobg2f3h69fIJr7YLCWo,1181
autopep8-1.5.6.dist-info/METADATA,sha256=f5pn0aVSAUMaM6J_WhXm_T4ZaaN85eky1uVLmH3Y8-c,16661
autopep8-1.5.6.dist-info/RECORD,,
autopep8-1.5.6.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
autopep8-1.5.6.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110
autopep8-1.5.6.dist-info/entry_points.txt,sha256=iHNa5_cSXw2ablVbRmfiFGMG1CNrpEPRCEjn3nspaJ8,44
autopep8-1.5.6.dist-info/top_level.txt,sha256=s2x-di3QBwGxr7kd5xErt2pom8dsFRdINbmwsOEgLfU,9
autopep8.py,sha256=wXlK4sGQ1RNCeh_JGdQnyQRJEtd9gqp_lzFbTYklHCI,153221
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/autopep8-1.5.6.dist-info/LICENSE
|
Copyright (C) 2010-2011 Hideo Hattori
Copyright (C) 2011-2013 Hideo Hattori, Steven Myint
Copyright (C) 2013-2016 Hideo Hattori, Steven Myint, Bill Wendling
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/autopep8-1.5.6.dist-info/WHEEL
|
Wheel-Version: 1.0
Generator: bdist_wheel (0.36.2)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/autopep8-1.5.6.dist-info/entry_points.txt
|
[console_scripts]
autopep8 = autopep8:main
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/autopep8-1.5.6.dist-info/AUTHORS.rst
|
Main contributors
-----------------
- Hideo Hattori (https://github.com/hhatto)
- Steven Myint (https://github.com/myint)
- Bill Wendling (https://github.com/gwelymernans)
Patches
-------
- Fraser Tweedale (https://github.com/frasertweedale)
- clach04 (https://github.com/clach04)
- Marc Abramowitz (https://github.com/msabramo)
- dellis23 (https://github.com/dellis23)
- Sam Vilain (https://github.com/samv)
- Florent Xicluna (https://github.com/florentx)
- Andras Tim (https://github.com/andras-tim)
- tomscytale (https://github.com/tomscytale)
- Filip Noetzel (https://github.com/peritus)
- Erik Bray (https://github.com/iguananaut)
- Christopher Medrela (https://github.com/chrismedrela)
- 小明 (https://github.com/dongweiming)
- Andy Hayden (https://github.com/hayd)
- Fabio Zadrozny (https://github.com/fabioz)
- Alex Chernetz (https://github.com/achernet)
- Marc Schlaich (https://github.com/schlamar)
- E. M. Bray (https://github.com/embray)
- Thomas Hisch (https://github.com/thisch)
- Florian Best (https://github.com/spaceone)
- Ian Clark (https://github.com/evenicoulddoit)
- Khairi Hafsham (https://github.com/khairihafsham)
- Neil Halelamien (https://github.com/neilsh)
- Hashem Nasarat (https://github.com/Hnasar)
- Hugo van Kemenade (https://github.com/hugovk)
- gmbnomis (https://github.com/gmbnomis)
- Samuel Lelièvre (https://github.com/slel)
- bigredengineer (https://github.com/bigredengineer)
- Kai Chen (https://github.com/kx-chen)
- Anthony Sottile (https://github.com/asottile)
- 秋葉 (https://github.com/Hanaasagi)
- Christian Clauss (https://github.com/cclauss)
- tobixx (https://github.com/tobixx)
- bigredengineer (https://github.com/bigredengineer)
- Bastien Gérard (https://github.com/bagerard)
- nicolasbonifas (https://github.com/nicolasbonifas)
- Andrii Yurchuk (https://github.com/Ch00k)
- José M. Guisado (https://github.com/pvxe)
- Dai Truong (https://github.com/NovaDev94)
- jnozsc (https://github.com/jnozsc)
- Edwin Shepherd (https://github.com/shardros)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/autopep8-1.5.6.dist-info/top_level.txt
|
autopep8
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/autopep8-1.5.6.dist-info/INSTALLER
|
pip
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/autopep8-1.5.6.dist-info/METADATA
|
Metadata-Version: 2.1
Name: autopep8
Version: 1.5.6
Summary: A tool that automatically formats Python code to conform to the PEP 8 style guide
Home-page: https://github.com/hhatto/autopep8
Author: Hideo Hattori
Author-email: hhatto.jp@gmail.com
License: Expat License
Keywords: automation,pep8,format,pycodestyle
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Console
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: MIT License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Classifier: Topic :: Software Development :: Quality Assurance
Requires-Dist: pycodestyle (>=2.7.0)
Requires-Dist: toml
========
autopep8
========
.. image:: https://img.shields.io/pypi/v/autopep8.svg
:target: https://pypi.org/project/autopep8/
:alt: PyPI Version
.. image:: https://github.com/hhatto/autopep8/workflows/Python%20package/badge.svg
:target: https://github.com/hhatto/autopep8/actions
:alt: Build status
.. image:: https://codecov.io/gh/hhatto/autopep8/branch/master/graph/badge.svg
:target: https://codecov.io/gh/hhatto/autopep8
:alt: Code Coverage
autopep8 automatically formats Python code to conform to the `PEP 8`_ style
guide. It uses the pycodestyle_ utility to determine what parts of the code
needs to be formatted. autopep8 is capable of fixing most of the formatting
issues_ that can be reported by pycodestyle.
.. _PEP 8: https://www.python.org/dev/peps/pep-0008/
.. _issues: https://pycodestyle.readthedocs.org/en/latest/intro.html#error-codes
.. contents::
Installation
============
From pip::
$ pip install --upgrade autopep8
Consider using the ``--user`` option_.
.. _option: https://pip.pypa.io/en/latest/user_guide/#user-installs
Requirements
============
autopep8 requires pycodestyle_.
.. _pycodestyle: https://github.com/PyCQA/pycodestyle
Usage
=====
To modify a file in place (with aggressive level 2)::
$ autopep8 --in-place --aggressive --aggressive <filename>
Before running autopep8.
.. code-block:: python
import math, sys;
def example1():
####This is a long comment. This should be wrapped to fit within 72 characters.
some_tuple=( 1,2, 3,'a' );
some_variable={'long':'Long code lines should be wrapped within 79 characters.',
'other':[math.pi, 100,200,300,9876543210,'This is a long string that goes on'],
'more':{'inner':'This whole logical line should be wrapped.',some_tuple:[1,
20,300,40000,500000000,60000000000000000]}}
return (some_tuple, some_variable)
def example2(): return {'has_key() is deprecated':True}.has_key({'f':2}.has_key(''));
class Example3( object ):
def __init__ ( self, bar ):
#Comments should have a space after the hash.
if bar : bar+=1; bar=bar* bar ; return bar
else:
some_string = """
Indentation in multiline strings should not be touched.
Only actual code should be reindented.
"""
return (sys.path, some_string)
After running autopep8.
.. code-block:: python
import math
import sys
def example1():
# This is a long comment. This should be wrapped to fit within 72
# characters.
some_tuple = (1, 2, 3, 'a')
some_variable = {
'long': 'Long code lines should be wrapped within 79 characters.',
'other': [
math.pi,
100,
200,
300,
9876543210,
'This is a long string that goes on'],
'more': {
'inner': 'This whole logical line should be wrapped.',
some_tuple: [
1,
20,
300,
40000,
500000000,
60000000000000000]}}
return (some_tuple, some_variable)
def example2(): return ('' in {'f': 2}) in {'has_key() is deprecated': True}
class Example3(object):
def __init__(self, bar):
# Comments should have a space after the hash.
if bar:
bar += 1
bar = bar * bar
return bar
else:
some_string = """
Indentation in multiline strings should not be touched.
Only actual code should be reindented.
"""
return (sys.path, some_string)
Options::
usage: autopep8 [-h] [--version] [-v] [-d] [-i] [--global-config filename]
[--ignore-local-config] [-r] [-j n] [-p n] [-a]
[--experimental] [--exclude globs] [--list-fixes]
[--ignore errors] [--select errors] [--max-line-length n]
[--line-range line line] [--hang-closing] [--exit-code]
[files [files ...]]
Automatically formats Python code to conform to the PEP 8 style guide.
positional arguments:
files files to format or '-' for standard in
optional arguments:
-h, --help show this help message and exit
--version show program's version number and exit
-v, --verbose print verbose messages; multiple -v result in more
verbose messages
-d, --diff print the diff for the fixed source
-i, --in-place make changes to files in place
--global-config filename
path to a global pep8 config file; if this file does
not exist then this is ignored (default:
~/.config/pep8)
--ignore-local-config
don't look for and apply local config files; if not
passed, defaults are updated with any config files in
the project's root directory
-r, --recursive run recursively over directories; must be used with
--in-place or --diff
-j n, --jobs n number of parallel jobs; match CPU count if value is
less than 1
-p n, --pep8-passes n
maximum number of additional pep8 passes (default:
infinite)
-a, --aggressive enable non-whitespace changes; multiple -a result in
more aggressive changes
--experimental enable experimental fixes
--exclude globs exclude file/directory names that match these comma-
separated globs
--list-fixes list codes for fixes; used by --ignore and --select
--ignore errors do not fix these errors/warnings (default:
E226,E24,W50,W690)
--select errors fix only these errors/warnings (e.g. E4,W)
--max-line-length n set maximum allowed line length (default: 79)
--line-range line line, --range line line
only fix errors found within this inclusive range of
line numbers (e.g. 1 99); line numbers are indexed at
1
--hang-closing hang-closing option passed to pycodestyle
--exit-code change to behavior of exit code. default behavior of
return value, 0 is no differences, 1 is error exit.
return 2 when add this option. 2 is exists
differences.
Features
========
autopep8 fixes the following issues_ reported by pycodestyle_::
E101 - Reindent all lines.
E11 - Fix indentation.
E121 - Fix indentation to be a multiple of four.
E122 - Add absent indentation for hanging indentation.
E123 - Align closing bracket to match opening bracket.
E124 - Align closing bracket to match visual indentation.
E125 - Indent to distinguish line from next logical line.
E126 - Fix over-indented hanging indentation.
E127 - Fix visual indentation.
E128 - Fix visual indentation.
E129 - Fix visual indentation.
E131 - Fix hanging indent for unaligned continuation line.
E133 - Fix missing indentation for closing bracket.
E20 - Remove extraneous whitespace.
E211 - Remove extraneous whitespace.
E22 - Fix extraneous whitespace around keywords.
E224 - Remove extraneous whitespace around operator.
E225 - Fix missing whitespace around operator.
E226 - Fix missing whitespace around arithmetic operator.
E227 - Fix missing whitespace around bitwise/shift operator.
E228 - Fix missing whitespace around modulo operator.
E231 - Add missing whitespace.
E241 - Fix extraneous whitespace around keywords.
E242 - Remove extraneous whitespace around operator.
E251 - Remove whitespace around parameter '=' sign.
E252 - Missing whitespace around parameter equals.
E26 - Fix spacing after comment hash for inline comments.
E265 - Fix spacing after comment hash for block comments.
E266 - Fix too many leading '#' for block comments.
E27 - Fix extraneous whitespace around keywords.
E301 - Add missing blank line.
E302 - Add missing 2 blank lines.
E303 - Remove extra blank lines.
E304 - Remove blank line following function decorator.
E305 - Expected 2 blank lines after end of function or class.
E306 - Expected 1 blank line before a nested definition.
E401 - Put imports on separate lines.
E402 - Fix module level import not at top of file
E501 - Try to make lines fit within --max-line-length characters.
E502 - Remove extraneous escape of newline.
E701 - Put colon-separated compound statement on separate lines.
E70 - Put semicolon-separated compound statement on separate lines.
E711 - Fix comparison with None.
E712 - Fix comparison with boolean.
E713 - Use 'not in' for test for membership.
E714 - Use 'is not' test for object identity.
E721 - Use "isinstance()" instead of comparing types directly.
E722 - Fix bare except.
E731 - Use a def when use do not assign a lambda expression.
W291 - Remove trailing whitespace.
W292 - Add a single newline at the end of the file.
W293 - Remove trailing whitespace on blank line.
W391 - Remove trailing blank lines.
W503 - Fix line break before binary operator.
W504 - Fix line break after binary operator.
W601 - Use "in" rather than "has_key()".
W602 - Fix deprecated form of raising exception.
W603 - Use "!=" instead of "<>"
W604 - Use "repr()" instead of backticks.
W605 - Fix invalid escape sequence 'x'.
W690 - Fix various deprecated code (via lib2to3).
autopep8 also fixes some issues not found by pycodestyle_.
- Correct deprecated or non-idiomatic Python code (via ``lib2to3``). Use this
for making Python 2.7 code more compatible with Python 3. (This is triggered
if ``W690`` is enabled.)
- Normalize files with mixed line endings.
- Put a blank line between a class docstring and its first method
declaration. (Enabled with ``E301``.)
- Remove blank lines between a function declaration and its docstring. (Enabled
with ``E303``.)
autopep8 avoids fixing some issues found by pycodestyle_.
- ``E112``/``E113`` for non comments are reports of bad indentation that break
syntax rules. These should not be modified at all.
- ``E265``, which refers to spacing after comment hash, is ignored if the
comment looks like code. autopep8 avoids modifying these since they are not
real comments. If you really want to get rid of the pycodestyle_ warning,
consider just removing the commented-out code. (This can be automated via
eradicate_.)
.. _eradicate: https://github.com/myint/eradicate
More advanced usage
===================
By default autopep8 only makes whitespace changes. Thus, by default, it does
not fix ``E711`` and ``E712``. (Changing ``x == None`` to ``x is None`` may
change the meaning of the program if ``x`` has its ``__eq__`` method
overridden.) Nor does it correct deprecated code ``W6``. To enable these
more aggressive fixes, use the ``--aggressive`` option::
$ autopep8 --aggressive <filename>
Use multiple ``--aggressive`` to increase the aggressiveness level. For
example, ``E712`` requires aggressiveness level 2 (since ``x == True`` could be
changed to either ``x`` or ``x is True``, but autopep8 chooses the former).
``--aggressive`` will also shorten lines more aggressively. It will also remove
trailing whitespace more aggressively. (Usually, we don't touch trailing
whitespace in docstrings and other multiline strings. And to do even more
aggressive changes to docstrings, use docformatter_.)
.. _docformatter: https://github.com/myint/docformatter
To enable only a subset of the fixes, use the ``--select`` option. For example,
to fix various types of indentation issues::
$ autopep8 --select=E1,W1 <filename>
Similarly, to just fix deprecated code::
$ autopep8 --aggressive --select=W6 <filename>
The above is useful when trying to port a single code base to work with both
Python 2 and Python 3 at the same time.
If the file being fixed is large, you may want to enable verbose progress
messages::
$ autopep8 -v <filename>
Passing in ``--experimental`` enables the following functionality:
- Shortens code lines by taking its length into account
::
$ autopep8 --experimental <filename>
Use as a module
===============
The simplest way of using autopep8 as a module is via the ``fix_code()``
function:
>>> import autopep8
>>> autopep8.fix_code('x= 123\n')
'x = 123\n'
Or with options:
>>> import autopep8
>>> autopep8.fix_code('x.has_key(y)\n',
... options={'aggressive': 1})
'y in x\n'
>>> autopep8.fix_code('print( 123 )\n',
... options={'ignore': ['E']})
'print( 123 )\n'
Configuration
=============
By default, if ``$HOME/.config/pycodestyle`` (``~\.pycodestyle`` in Windows
environment) exists, it will be used as global configuration file.
Alternatively, you can specify the global configuration file with the
``--global-config`` option.
Also, if ``setup.cfg``, ``tox.ini``, ``.pep8`` and ``.flake8`` files exist
in the directory where the target file exists, it will be used as the
configuration file.
``pep8``, ``pycodestyle``, and ``flake8`` can be used as a section.
configuration file example::
[pycodestyle]
max_line_length = 120
ignore = E501
pyproject.toml
--------------
autopep8 can also use ``pyproject.toml``.
section must use ``[tool.autopep8]``, and ``pyproject.toml`` takes precedence
over any other configuration files.
configuration file example::
[tool.autopep8]
max_line_length = 120
ignore = "E501,W6" # or ["E501", "W6"]
Testing
=======
Test cases are in ``test/test_autopep8.py``. They can be run directly via
``python test/test_autopep8.py`` or via tox_. The latter is useful for
testing against multiple Python interpreters. (We currently test against
CPython versions 2.7, 3.6 3.7 and 3.8. We also test against PyPy.)
.. _`tox`: https://pypi.org/project/tox/
Broad spectrum testing is available via ``test/acid.py``. This script runs
autopep8 against Python code and checks for correctness and completeness of the
code fixes. It can check that the bytecode remains identical.
``test/acid_pypi.py`` makes use of ``acid.py`` to test against the latest
released packages on PyPI.
Troubleshooting
===============
``pkg_resources.DistributionNotFound``
--------------------------------------
If you are using an ancient version of ``setuptools``, you might encounter
``pkg_resources.DistributionNotFound`` when trying to run ``autopep8``. Try
upgrading ``setuptools`` to workaround this ``setuptools`` problem::
$ pip install --upgrade setuptools
Use ``sudo`` if you are installing to the system.
Links
=====
* PyPI_
* GitHub_
* `Travis CI`_
* Coveralls_
.. _PyPI: https://pypi.org/project/autopep8/
.. _GitHub: https://github.com/hhatto/autopep8
.. _`Travis CI`: https://travis-ci.org/hhatto/autopep8
.. _`Coveralls`: https://coveralls.io/r/hhatto/autopep8
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/ssl_support.py
|
import os
import socket
import atexit
import re
import functools
from setuptools.extern.six.moves import urllib, http_client, map, filter
from pkg_resources import ResolutionError, ExtractionError
try:
import ssl
except ImportError:
ssl = None
__all__ = [
'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths',
'opener_for'
]
cert_paths = """
/etc/pki/tls/certs/ca-bundle.crt
/etc/ssl/certs/ca-certificates.crt
/usr/share/ssl/certs/ca-bundle.crt
/usr/local/share/certs/ca-root.crt
/etc/ssl/cert.pem
/System/Library/OpenSSL/certs/cert.pem
/usr/local/share/certs/ca-root-nss.crt
/etc/ssl/ca-bundle.pem
""".strip().split()
try:
HTTPSHandler = urllib.request.HTTPSHandler
HTTPSConnection = http_client.HTTPSConnection
except AttributeError:
HTTPSHandler = HTTPSConnection = object
is_available = ssl is not None and object not in (
HTTPSHandler, HTTPSConnection)
try:
from ssl import CertificateError, match_hostname
except ImportError:
try:
from backports.ssl_match_hostname import CertificateError
from backports.ssl_match_hostname import match_hostname
except ImportError:
CertificateError = None
match_hostname = None
if not CertificateError:
class CertificateError(ValueError):
pass
if not match_hostname:
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
https://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a
# presented identifier in which the wildcard
# character comprises a label other than the
# left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError(
"hostname %r doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError(
"hostname %r doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError(
"no appropriate commonName or "
"subjectAltName fields were found")
class VerifyingHTTPSHandler(HTTPSHandler):
"""Simple verifying handler: no auth, subclasses, timeouts, etc."""
def __init__(self, ca_bundle):
self.ca_bundle = ca_bundle
HTTPSHandler.__init__(self)
def https_open(self, req):
return self.do_open(
lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw),
req
)
class VerifyingHTTPSConn(HTTPSConnection):
"""Simple verifying connection: no auth, subclasses, timeouts, etc."""
def __init__(self, host, ca_bundle, **kw):
HTTPSConnection.__init__(self, host, **kw)
self.ca_bundle = ca_bundle
def connect(self):
sock = socket.create_connection(
(self.host, self.port), getattr(self, 'source_address', None)
)
# Handle the socket if a (proxy) tunnel is present
if hasattr(self, '_tunnel') and getattr(self, '_tunnel_host', None):
self.sock = sock
self._tunnel()
# http://bugs.python.org/issue7776: Python>=3.4.1 and >=2.7.7
# change self.host to mean the proxy server host when tunneling is
# being used. Adapt, since we are interested in the destination
# host for the match_hostname() comparison.
actual_host = self._tunnel_host
else:
actual_host = self.host
if hasattr(ssl, 'create_default_context'):
ctx = ssl.create_default_context(cafile=self.ca_bundle)
self.sock = ctx.wrap_socket(sock, server_hostname=actual_host)
else:
# This is for python < 2.7.9 and < 3.4?
self.sock = ssl.wrap_socket(
sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle
)
try:
match_hostname(self.sock.getpeercert(), actual_host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
def opener_for(ca_bundle=None):
"""Get a urlopen() replacement that uses ca_bundle for verification"""
return urllib.request.build_opener(
VerifyingHTTPSHandler(ca_bundle or find_ca_bundle())
).open
# from jaraco.functools
def once(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not hasattr(func, 'always_returns'):
func.always_returns = func(*args, **kwargs)
return func.always_returns
return wrapper
@once
def get_win_certfile():
try:
import wincertstore
except ImportError:
return None
class CertFile(wincertstore.CertFile):
def __init__(self):
super(CertFile, self).__init__()
atexit.register(self.close)
def close(self):
try:
super(CertFile, self).close()
except OSError:
pass
_wincerts = CertFile()
_wincerts.addstore('CA')
_wincerts.addstore('ROOT')
return _wincerts.name
def find_ca_bundle():
"""Return an existing CA bundle path, or None"""
extant_cert_paths = filter(os.path.isfile, cert_paths)
return (
get_win_certfile()
or next(extant_cert_paths, None)
or _certifi_where()
)
def _certifi_where():
try:
return __import__('certifi').where()
except (ImportError, ResolutionError, ExtractionError):
pass
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/windows_support.py
|
import platform
import ctypes
def windows_only(func):
if platform.system() != 'Windows':
return lambda *args, **kwargs: None
return func
@windows_only
def hide_file(path):
"""
Set the hidden attribute on a file or directory.
From http://stackoverflow.com/questions/19622133/
`path` must be text.
"""
__import__('ctypes.wintypes')
SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW
SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD
SetFileAttributes.restype = ctypes.wintypes.BOOL
FILE_ATTRIBUTE_HIDDEN = 0x02
ret = SetFileAttributes(path, FILE_ATTRIBUTE_HIDDEN)
if not ret:
raise ctypes.WinError()
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_deprecation_warning.py
|
class SetuptoolsDeprecationWarning(Warning):
"""
Base class for warning deprecations in ``setuptools``
This class is not derived from ``DeprecationWarning``, and as such is
visible by default.
"""
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/package_index.py
|
"""PyPI and direct package downloading"""
import sys
import os
import re
import shutil
import socket
import base64
import hashlib
import itertools
import warnings
from functools import wraps
from setuptools.extern import six
from setuptools.extern.six.moves import urllib, http_client, configparser, map
import setuptools
from pkg_resources import (
CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
Environment, find_distributions, safe_name, safe_version,
to_filename, Requirement, DEVELOP_DIST, EGG_DIST,
)
from setuptools import ssl_support
from distutils import log
from distutils.errors import DistutilsError
from fnmatch import translate
from setuptools.py27compat import get_all_headers
from setuptools.py33compat import unescape
from setuptools.wheel import Wheel
__metaclass__ = type
EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.+!]+)$')
HREF = re.compile(r"""href\s*=\s*['"]?([^'"> ]+)""", re.I)
PYPI_MD5 = re.compile(
r'<a href="([^"#]+)">([^<]+)</a>\n\s+\(<a (?:title="MD5 hash"\n\s+)'
r'href="[^?]+\?:action=show_md5&digest=([0-9a-f]{32})">md5</a>\)'
)
URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match
EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
__all__ = [
'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
'interpret_distro_name',
]
_SOCKET_TIMEOUT = 15
_tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}"
user_agent = _tmpl.format(
py_major='{}.{}'.format(*sys.version_info), setuptools=setuptools)
def parse_requirement_arg(spec):
try:
return Requirement.parse(spec)
except ValueError as e:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
) from e
def parse_bdist_wininst(name):
"""Return (base,pyversion) or (None,None) for possible .exe name"""
lower = name.lower()
base, py_ver, plat = None, None, None
if lower.endswith('.exe'):
if lower.endswith('.win32.exe'):
base = name[:-10]
plat = 'win32'
elif lower.startswith('.win32-py', -16):
py_ver = name[-7:-4]
base = name[:-16]
plat = 'win32'
elif lower.endswith('.win-amd64.exe'):
base = name[:-14]
plat = 'win-amd64'
elif lower.startswith('.win-amd64-py', -20):
py_ver = name[-7:-4]
base = name[:-20]
plat = 'win-amd64'
return base, py_ver, plat
def egg_info_for_url(url):
parts = urllib.parse.urlparse(url)
scheme, server, path, parameters, query, fragment = parts
base = urllib.parse.unquote(path.split('/')[-1])
if server == 'sourceforge.net' and base == 'download': # XXX Yuck
base = urllib.parse.unquote(path.split('/')[-2])
if '#' in base:
base, fragment = base.split('#', 1)
return base, fragment
def distros_for_url(url, metadata=None):
"""Yield egg or source distribution objects that might be found at a URL"""
base, fragment = egg_info_for_url(url)
for dist in distros_for_location(url, base, metadata):
yield dist
if fragment:
match = EGG_FRAGMENT.match(fragment)
if match:
for dist in interpret_distro_name(
url, match.group(1), metadata, precedence=CHECKOUT_DIST
):
yield dist
def distros_for_location(location, basename, metadata=None):
"""Yield egg or source distribution objects based on basename"""
if basename.endswith('.egg.zip'):
basename = basename[:-4] # strip the .zip
if basename.endswith('.egg') and '-' in basename:
# only one, unambiguous interpretation
return [Distribution.from_location(location, basename, metadata)]
if basename.endswith('.whl') and '-' in basename:
wheel = Wheel(basename)
if not wheel.is_compatible():
return []
return [Distribution(
location=location,
project_name=wheel.project_name,
version=wheel.version,
# Increase priority over eggs.
precedence=EGG_DIST + 1,
)]
if basename.endswith('.exe'):
win_base, py_ver, platform = parse_bdist_wininst(basename)
if win_base is not None:
return interpret_distro_name(
location, win_base, metadata, py_ver, BINARY_DIST, platform
)
# Try source distro extensions (.zip, .tgz, etc.)
#
for ext in EXTENSIONS:
if basename.endswith(ext):
basename = basename[:-len(ext)]
return interpret_distro_name(location, basename, metadata)
return [] # no extension matched
def distros_for_filename(filename, metadata=None):
"""Yield possible egg or source distribution objects based on a filename"""
return distros_for_location(
normalize_path(filename), os.path.basename(filename), metadata
)
def interpret_distro_name(
location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
platform=None
):
"""Generate alternative interpretations of a source distro name
Note: if `location` is a filesystem filename, you should call
``pkg_resources.normalize_path()`` on it before passing it to this
routine!
"""
# Generate alternative interpretations of a source distro name
# Because some packages are ambiguous as to name/versions split
# e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
# So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
# "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
# the spurious interpretations should be ignored, because in the event
# there's also an "adns" package, the spurious "python-1.1.0" version will
# compare lower than any numeric version number, and is therefore unlikely
# to match a request for it. It's still a potential problem, though, and
# in the long run PyPI and the distutils should go for "safe" names and
# versions in distribution archive names (sdist and bdist).
parts = basename.split('-')
if not py_version and any(re.match(r'py\d\.\d$', p) for p in parts[2:]):
# it is a bdist_dumb, not an sdist -- bail out
return
for p in range(1, len(parts) + 1):
yield Distribution(
location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
py_version=py_version, precedence=precedence,
platform=platform
)
# From Python 2.7 docs
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in six.moves.filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def unique_values(func):
"""
Wrap a function returning an iterable such that the resulting iterable
only ever yields unique items.
"""
@wraps(func)
def wrapper(*args, **kwargs):
return unique_everseen(func(*args, **kwargs))
return wrapper
REL = re.compile(r"""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
# this line is here to fix emacs' cruddy broken syntax highlighting
@unique_values
def find_external_links(url, page):
"""Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
for match in REL.finditer(page):
tag, rel = match.groups()
rels = set(map(str.strip, rel.lower().split(',')))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos != -1:
match = HREF.search(page, pos)
if match:
yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
class ContentChecker:
"""
A null content checker that defines the interface for checking content
"""
def feed(self, block):
"""
Feed a block of data to the hash.
"""
return
def is_valid(self):
"""
Check the hash. Return False if validation fails.
"""
return True
def report(self, reporter, template):
"""
Call reporter with information about the checker (hash name)
substituted into the template.
"""
return
class HashChecker(ContentChecker):
pattern = re.compile(
r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
r'(?P<expected>[a-f0-9]+)'
)
def __init__(self, hash_name, expected):
self.hash_name = hash_name
self.hash = hashlib.new(hash_name)
self.expected = expected
@classmethod
def from_url(cls, url):
"Construct a (possibly null) ContentChecker from a URL"
fragment = urllib.parse.urlparse(url)[-1]
if not fragment:
return ContentChecker()
match = cls.pattern.search(fragment)
if not match:
return ContentChecker()
return cls(**match.groupdict())
def feed(self, block):
self.hash.update(block)
def is_valid(self):
return self.hash.hexdigest() == self.expected
def report(self, reporter, template):
msg = template % self.hash_name
return reporter(msg)
class PackageIndex(Environment):
"""A distribution index that scans web pages for download URLs"""
def __init__(
self, index_url="https://pypi.org/simple/", hosts=('*',),
ca_bundle=None, verify_ssl=True, *args, **kw
):
Environment.__init__(self, *args, **kw)
self.index_url = index_url + "/" [:not index_url.endswith('/')]
self.scanned_urls = {}
self.fetched_urls = {}
self.package_pages = {}
self.allows = re.compile('|'.join(map(translate, hosts))).match
self.to_scan = []
use_ssl = (
verify_ssl
and ssl_support.is_available
and (ca_bundle or ssl_support.find_ca_bundle())
)
if use_ssl:
self.opener = ssl_support.opener_for(ca_bundle)
else:
self.opener = urllib.request.urlopen
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
if not URL_SCHEME(url):
self.process_filename(url)
return
else:
dists = list(distros_for_url(url))
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
list(map(self.add, dists))
return # don't need the actual page
if not self.url_ok(url):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
tmpl = "Download error on %s: %%s -- Some packages may not be found!"
f = self.open_url(url, tmpl % url)
if f is None:
return
if isinstance(f, urllib.error.HTTPError) and f.code == 401:
self.info("Authentication error: %s" % f.msg)
self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
return
base = f.url # handle redirects
page = f.read()
if not isinstance(page, str):
# In Python 3 and got bytes but want str.
if isinstance(f, urllib.error.HTTPError):
# Errors have no charset, assume latin1:
charset = 'latin-1'
else:
charset = f.headers.get_param('charset') or 'latin-1'
page = page.decode(charset, "ignore")
f.close()
for match in HREF.finditer(page):
link = urllib.parse.urljoin(base, htmldecode(match.group(1)))
self.process_url(link)
if url.startswith(self.index_url) and getattr(f, 'code', None) != 404:
page = self.process_index(url, page)
def process_filename(self, fn, nested=False):
# process filenames or directories
if not os.path.exists(fn):
self.warn("Not found: %s", fn)
return
if os.path.isdir(fn) and not nested:
path = os.path.realpath(fn)
for item in os.listdir(path):
self.process_filename(os.path.join(path, item), True)
dists = distros_for_filename(fn)
if dists:
self.debug("Found: %s", fn)
list(map(self.add, dists))
def url_ok(self, url, fatal=False):
s = URL_SCHEME(url)
is_file = s and s.group(1).lower() == 'file'
if is_file or self.allows(urllib.parse.urlparse(url)[1]):
return True
msg = (
"\nNote: Bypassing %s (disallowed host; see "
"http://bit.ly/2hrImnY for details).\n")
if fatal:
raise DistutilsError(msg % url)
else:
self.warn(msg, url)
def scan_egg_links(self, search_path):
dirs = filter(os.path.isdir, search_path)
egg_links = (
(path, entry)
for path in dirs
for entry in os.listdir(path)
if entry.endswith('.egg-link')
)
list(itertools.starmap(self.scan_egg_link, egg_links))
def scan_egg_link(self, path, entry):
with open(os.path.join(path, entry)) as raw_lines:
# filter non-empty lines
lines = list(filter(None, map(str.strip, raw_lines)))
if len(lines) != 2:
# format is not recognized; punt
return
egg_path, setup_path = lines
for dist in find_distributions(os.path.join(path, egg_path)):
dist.location = os.path.join(path, *lines)
dist.precedence = SOURCE_DIST
self.add(dist)
def process_index(self, url, page):
"""Process the contents of a PyPI page"""
def scan(link):
# Process a URL to see if it's for a package page
if link.startswith(self.index_url):
parts = list(map(
urllib.parse.unquote, link[len(self.index_url):].split('/')
))
if len(parts) == 2 and '#' not in parts[1]:
# it's a package page, sanitize and index it
pkg = safe_name(parts[0])
ver = safe_version(parts[1])
self.package_pages.setdefault(pkg.lower(), {})[link] = True
return to_filename(pkg), to_filename(ver)
return None, None
# process an index page into the package-page index
for match in HREF.finditer(page):
try:
scan(urllib.parse.urljoin(url, htmldecode(match.group(1))))
except ValueError:
pass
pkg, ver = scan(url) # ensure this page is in the page index
if pkg:
# process individual package page
for new_url in find_external_links(url, page):
# Process the found URL
base, frag = egg_info_for_url(new_url)
if base.endswith('.py') and not frag:
if ver:
new_url += '#egg=%s-%s' % (pkg, ver)
else:
self.need_version_info(url)
self.scan_url(new_url)
return PYPI_MD5.sub(
lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page
)
else:
return "" # no sense double-scanning non-package pages
def need_version_info(self, url):
self.scan_all(
"Page at %s links to .py file(s) without version info; an index "
"scan is required.", url
)
def scan_all(self, msg=None, *args):
if self.index_url not in self.fetched_urls:
if msg:
self.warn(msg, *args)
self.info(
"Scanning index of all packages (this may take a while)"
)
self.scan_url(self.index_url)
def find_packages(self, requirement):
self.scan_url(self.index_url + requirement.unsafe_name + '/')
if not self.package_pages.get(requirement.key):
# Fall back to safe version of the name
self.scan_url(self.index_url + requirement.project_name + '/')
if not self.package_pages.get(requirement.key):
# We couldn't find the target package, so search the index page too
self.not_found_in_index(requirement)
for url in list(self.package_pages.get(requirement.key, ())):
# scan each page that might be related to the desired package
self.scan_url(url)
def obtain(self, requirement, installer=None):
self.prescan()
self.find_packages(requirement)
for dist in self[requirement.key]:
if dist in requirement:
return dist
self.debug("%s does not match %s", requirement, dist)
return super(PackageIndex, self).obtain(requirement, installer)
def check_hash(self, checker, filename, tfp):
"""
checker is a ContentChecker
"""
checker.report(
self.debug,
"Validating %%s checksum for %s" % filename)
if not checker.is_valid():
tfp.close()
os.unlink(filename)
raise DistutilsError(
"%s validation failed for %s; "
"possible download problem?"
% (checker.hash.name, os.path.basename(filename))
)
def add_find_links(self, urls):
"""Add `urls` to the list that will be prescanned for searches"""
for url in urls:
if (
self.to_scan is None # if we have already "gone online"
or not URL_SCHEME(url) # or it's a local file/directory
or url.startswith('file:')
or list(distros_for_url(url)) # or a direct package link
):
# then go ahead and process it now
self.scan_url(url)
else:
# otherwise, defer retrieval till later
self.to_scan.append(url)
def prescan(self):
"""Scan urls scheduled for prescanning (e.g. --find-links)"""
if self.to_scan:
list(map(self.scan_url, self.to_scan))
self.to_scan = None # from now on, go ahead and process immediately
def not_found_in_index(self, requirement):
if self[requirement.key]: # we've seen at least one distro
meth, msg = self.info, "Couldn't retrieve index page for %r"
else: # no distros seen for this name, might be misspelled
meth, msg = (
self.warn,
"Couldn't find index page for %r (maybe misspelled?)")
meth(msg, requirement.unsafe_name)
self.scan_all()
def download(self, spec, tmpdir):
"""Locate and/or download `spec` to `tmpdir`, returning a local path
`spec` may be a ``Requirement`` object, or a string containing a URL,
an existing local filename, or a project/version requirement spec
(i.e. the string form of a ``Requirement`` object). If it is the URL
of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
automatically created alongside the downloaded file.
If `spec` is a ``Requirement`` object or a string containing a
project/version requirement spec, this method returns the location of
a matching distribution (possibly after downloading it to `tmpdir`).
If `spec` is a locally existing file or directory name, it is simply
returned unchanged. If `spec` is a URL, it is downloaded to a subpath
of `tmpdir`, and the local filename is returned. Various errors may be
raised if a problem occurs during downloading.
"""
if not isinstance(spec, Requirement):
scheme = URL_SCHEME(spec)
if scheme:
# It's a url, download it to tmpdir
found = self._download_url(scheme.group(1), spec, tmpdir)
base, fragment = egg_info_for_url(spec)
if base.endswith('.py'):
found = self.gen_setup(found, fragment, tmpdir)
return found
elif os.path.exists(spec):
# Existing file or directory, just return it
return spec
else:
spec = parse_requirement_arg(spec)
return getattr(self.fetch_distribution(spec, tmpdir), 'location', None)
def fetch_distribution(
self, requirement, tmpdir, force_scan=False, source=False,
develop_ok=False, local_index=None):
"""Obtain a distribution suitable for fulfilling `requirement`
`requirement` must be a ``pkg_resources.Requirement`` instance.
If necessary, or if the `force_scan` flag is set, the requirement is
searched for in the (online) package index as well as the locally
installed packages. If a distribution matching `requirement` is found,
the returned distribution's ``location`` is the value you would have
gotten from calling the ``download()`` method with the matching
distribution's URL or filename. If no matching distribution is found,
``None`` is returned.
If the `source` flag is set, only source distributions and source
checkout links will be considered. Unless the `develop_ok` flag is
set, development and system eggs (i.e., those using the ``.egg-info``
format) will be ignored.
"""
# process a Requirement
self.info("Searching for %s", requirement)
skipped = {}
dist = None
def find(req, env=None):
if env is None:
env = self
# Find a matching distribution; may be called more than once
for dist in env[req.key]:
if dist.precedence == DEVELOP_DIST and not develop_ok:
if dist not in skipped:
self.warn(
"Skipping development or system egg: %s", dist,
)
skipped[dist] = 1
continue
test = (
dist in req
and (dist.precedence <= SOURCE_DIST or not source)
)
if test:
loc = self.download(dist.location, tmpdir)
dist.download_location = loc
if os.path.exists(dist.download_location):
return dist
if force_scan:
self.prescan()
self.find_packages(requirement)
dist = find(requirement)
if not dist and local_index is not None:
dist = find(requirement, local_index)
if dist is None:
if self.to_scan is not None:
self.prescan()
dist = find(requirement)
if dist is None and not force_scan:
self.find_packages(requirement)
dist = find(requirement)
if dist is None:
self.warn(
"No local packages or working download links found for %s%s",
(source and "a source distribution of " or ""),
requirement,
)
else:
self.info("Best match: %s", dist)
return dist.clone(location=dist.download_location)
def fetch(self, requirement, tmpdir, force_scan=False, source=False):
"""Obtain a file suitable for fulfilling `requirement`
DEPRECATED; use the ``fetch_distribution()`` method now instead. For
backward compatibility, this routine is identical but returns the
``location`` of the downloaded distribution instead of a distribution
object.
"""
dist = self.fetch_distribution(requirement, tmpdir, force_scan, source)
if dist is not None:
return dist.location
return None
def gen_setup(self, filename, fragment, tmpdir):
match = EGG_FRAGMENT.match(fragment)
dists = match and [
d for d in
interpret_distro_name(filename, match.group(1), None) if d.version
] or []
if len(dists) == 1: # unambiguous ``#egg`` fragment
basename = os.path.basename(filename)
# Make sure the file has been downloaded to the temp dir.
if os.path.dirname(filename) != tmpdir:
dst = os.path.join(tmpdir, basename)
from setuptools.command.easy_install import samefile
if not samefile(filename, dst):
shutil.copy2(filename, dst)
filename = dst
with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
file.write(
"from setuptools import setup\n"
"setup(name=%r, version=%r, py_modules=[%r])\n"
% (
dists[0].project_name, dists[0].version,
os.path.splitext(basename)[0]
)
)
return filename
elif match:
raise DistutilsError(
"Can't unambiguously interpret project/version identifier %r; "
"any dashes in the name or version should be escaped using "
"underscores. %r" % (fragment, dists)
)
else:
raise DistutilsError(
"Can't process plain .py files without an '#egg=name-version'"
" suffix to enable automatic setup script generation."
)
dl_blocksize = 8192
def _download_to(self, url, filename):
self.info("Downloading %s", url)
# Download the file
fp = None
try:
checker = HashChecker.from_url(url)
fp = self.open_url(url)
if isinstance(fp, urllib.error.HTTPError):
raise DistutilsError(
"Can't download %s: %s %s" % (url, fp.code, fp.msg)
)
headers = fp.info()
blocknum = 0
bs = self.dl_blocksize
size = -1
if "content-length" in headers:
# Some servers return multiple Content-Length headers :(
sizes = get_all_headers(headers, 'Content-Length')
size = max(map(int, sizes))
self.reporthook(url, filename, blocknum, bs, size)
with open(filename, 'wb') as tfp:
while True:
block = fp.read(bs)
if block:
checker.feed(block)
tfp.write(block)
blocknum += 1
self.reporthook(url, filename, blocknum, bs, size)
else:
break
self.check_hash(checker, filename, tfp)
return headers
finally:
if fp:
fp.close()
def reporthook(self, url, filename, blocknum, blksize, size):
pass # no-op
def open_url(self, url, warning=None):
if url.startswith('file:'):
return local_open(url)
try:
return open_with_auth(url, self.opener)
except (ValueError, http_client.InvalidURL) as v:
msg = ' '.join([str(arg) for arg in v.args])
if warning:
self.warn(warning, msg)
else:
raise DistutilsError('%s %s' % (url, msg)) from v
except urllib.error.HTTPError as v:
return v
except urllib.error.URLError as v:
if warning:
self.warn(warning, v.reason)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v.reason)) from v
except http_client.BadStatusLine as v:
if warning:
self.warn(warning, v.line)
else:
raise DistutilsError(
'%s returned a bad status line. The server might be '
'down, %s' %
(url, v.line)
) from v
except (http_client.HTTPException, socket.error) as v:
if warning:
self.warn(warning, v)
else:
raise DistutilsError("Download error for %s: %s"
% (url, v)) from v
def _download_url(self, scheme, url, tmpdir):
# Determine download filename
#
name, fragment = egg_info_for_url(url)
if name:
while '..' in name:
name = name.replace('..', '.').replace('\\', '_')
else:
name = "__downloaded__" # default if URL has no path contents
if name.endswith('.egg.zip'):
name = name[:-4] # strip the extra .zip before download
filename = os.path.join(tmpdir, name)
# Download the file
#
if scheme == 'svn' or scheme.startswith('svn+'):
return self._download_svn(url, filename)
elif scheme == 'git' or scheme.startswith('git+'):
return self._download_git(url, filename)
elif scheme.startswith('hg+'):
return self._download_hg(url, filename)
elif scheme == 'file':
return urllib.request.url2pathname(urllib.parse.urlparse(url)[2])
else:
self.url_ok(url, True) # raises error if not allowed
return self._attempt_download(url, filename)
def scan_url(self, url):
self.process_url(url, True)
def _attempt_download(self, url, filename):
headers = self._download_to(url, filename)
if 'html' in headers.get('content-type', '').lower():
return self._download_html(url, headers, filename)
else:
return filename
def _download_html(self, url, headers, filename):
file = open(filename)
for line in file:
if line.strip():
# Check for a subversion index page
if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
# it's a subversion index page:
file.close()
os.unlink(filename)
return self._download_svn(url, filename)
break # not an index page
file.close()
os.unlink(filename)
raise DistutilsError("Unexpected HTML page found at " + url)
def _download_svn(self, url, filename):
warnings.warn("SVN download support is deprecated", UserWarning)
url = url.split('#', 1)[0] # remove any fragment for svn's sake
creds = ''
if url.lower().startswith('svn:') and '@' in url:
scheme, netloc, path, p, q, f = urllib.parse.urlparse(url)
if not netloc and path.startswith('//') and '/' in path[2:]:
netloc, path = path[2:].split('/', 1)
auth, host = _splituser(netloc)
if auth:
if ':' in auth:
user, pw = auth.split(':', 1)
creds = " --username=%s --password=%s" % (user, pw)
else:
creds = " --username=" + auth
netloc = host
parts = scheme, netloc, url, p, q, f
url = urllib.parse.urlunparse(parts)
self.info("Doing subversion checkout from %s to %s", url, filename)
os.system("svn checkout%s -q %s %s" % (creds, url, filename))
return filename
@staticmethod
def _vcs_split_rev_from_url(url, pop_prefix=False):
scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
scheme = scheme.split('+', 1)[-1]
# Some fragment identification fails
path = path.split('#', 1)[0]
rev = None
if '@' in path:
path, rev = path.rsplit('@', 1)
# Also, discard fragment
url = urllib.parse.urlunsplit((scheme, netloc, path, query, ''))
return url, rev
def _download_git(self, url, filename):
filename = filename.split('#', 1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing git clone from %s to %s", url, filename)
os.system("git clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Checking out %s", rev)
os.system("git -C %s checkout --quiet %s" % (
filename,
rev,
))
return filename
def _download_hg(self, url, filename):
filename = filename.split('#', 1)[0]
url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
self.info("Doing hg clone from %s to %s", url, filename)
os.system("hg clone --quiet %s %s" % (url, filename))
if rev is not None:
self.info("Updating to %s", rev)
os.system("hg --cwd %s up -C -r %s -q" % (
filename,
rev,
))
return filename
def debug(self, msg, *args):
log.debug(msg, *args)
def info(self, msg, *args):
log.info(msg, *args)
def warn(self, msg, *args):
log.warn(msg, *args)
# This pattern matches a character entity reference (a decimal numeric
# references, a hexadecimal numeric reference, or a named reference).
entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
def decode_entity(match):
what = match.group(0)
return unescape(what)
def htmldecode(text):
"""
Decode HTML entities in the given text.
>>> htmldecode(
... 'https://../package_name-0.1.2.tar.gz'
... '?tokena=A&tokenb=B">package_name-0.1.2.tar.gz')
'https://../package_name-0.1.2.tar.gz?tokena=A&tokenb=B">package_name-0.1.2.tar.gz'
"""
return entity_sub(decode_entity, text)
def socket_timeout(timeout=15):
def _socket_timeout(func):
def _socket_timeout(*args, **kwargs):
old_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(timeout)
try:
return func(*args, **kwargs)
finally:
socket.setdefaulttimeout(old_timeout)
return _socket_timeout
return _socket_timeout
def _encode_auth(auth):
"""
A function compatible with Python 2.3-3.3 that will encode
auth from a URL suitable for an HTTP header.
>>> str(_encode_auth('username%3Apassword'))
'dXNlcm5hbWU6cGFzc3dvcmQ='
Long auth strings should not cause a newline to be inserted.
>>> long_auth = 'username:' + 'password'*10
>>> chr(10) in str(_encode_auth(long_auth))
False
"""
auth_s = urllib.parse.unquote(auth)
# convert to bytes
auth_bytes = auth_s.encode()
encoded_bytes = base64.b64encode(auth_bytes)
# convert back to a string
encoded = encoded_bytes.decode()
# strip the trailing carriage return
return encoded.replace('\n', '')
class Credential:
"""
A username/password pair. Use like a namedtuple.
"""
def __init__(self, username, password):
self.username = username
self.password = password
def __iter__(self):
yield self.username
yield self.password
def __str__(self):
return '%(username)s:%(password)s' % vars(self)
class PyPIConfig(configparser.RawConfigParser):
def __init__(self):
"""
Load from ~/.pypirc
"""
defaults = dict.fromkeys(['username', 'password', 'repository'], '')
configparser.RawConfigParser.__init__(self, defaults)
rc = os.path.join(os.path.expanduser('~'), '.pypirc')
if os.path.exists(rc):
self.read(rc)
@property
def creds_by_repository(self):
sections_with_repositories = [
section for section in self.sections()
if self.get(section, 'repository').strip()
]
return dict(map(self._get_repo_cred, sections_with_repositories))
def _get_repo_cred(self, section):
repo = self.get(section, 'repository').strip()
return repo, Credential(
self.get(section, 'username').strip(),
self.get(section, 'password').strip(),
)
def find_credential(self, url):
"""
If the URL indicated appears to be a repository defined in this
config, return the credential for that repository.
"""
for repository, cred in self.creds_by_repository.items():
if url.startswith(repository):
return cred
def open_with_auth(url, opener=urllib.request.urlopen):
"""Open a urllib2 request, handling HTTP authentication"""
parsed = urllib.parse.urlparse(url)
scheme, netloc, path, params, query, frag = parsed
# Double scheme does not raise on macOS as revealed by a
# failing test. We would expect "nonnumeric port". Refs #20.
if netloc.endswith(':'):
raise http_client.InvalidURL("nonnumeric port: ''")
if scheme in ('http', 'https'):
auth, address = _splituser(netloc)
else:
auth = None
if not auth:
cred = PyPIConfig().find_credential(url)
if cred:
auth = str(cred)
info = cred.username, url
log.info('Authenticating as %s for %s (from .pypirc)', *info)
if auth:
auth = "Basic " + _encode_auth(auth)
parts = scheme, address, path, params, query, frag
new_url = urllib.parse.urlunparse(parts)
request = urllib.request.Request(new_url)
request.add_header("Authorization", auth)
else:
request = urllib.request.Request(url)
request.add_header('User-Agent', user_agent)
fp = opener(request)
if auth:
# Put authentication info back into request URL if same host,
# so that links found on the page will work
s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url)
if s2 == scheme and h2 == address:
parts = s2, netloc, path2, param2, query2, frag2
fp.url = urllib.parse.urlunparse(parts)
return fp
# copy of urllib.parse._splituser from Python 3.8
def _splituser(host):
"""splituser('user[:passwd]@host[:port]')
--> 'user[:passwd]', 'host[:port]'."""
user, delim, host = host.rpartition('@')
return (user if delim else None), host
# adding a timeout to avoid freezing package_index
open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
def fix_sf_url(url):
return url # backward compatibility
def local_open(url):
"""Read a local path, with special support for directories"""
scheme, server, path, param, query, frag = urllib.parse.urlparse(url)
filename = urllib.request.url2pathname(path)
if os.path.isfile(filename):
return urllib.request.urlopen(url)
elif path.endswith('/') and os.path.isdir(filename):
files = []
for f in os.listdir(filename):
filepath = os.path.join(filename, f)
if f == 'index.html':
with open(filepath, 'r') as fp:
body = fp.read()
break
elif os.path.isdir(filepath):
f += '/'
files.append('<a href="{name}">{name}</a>'.format(name=f))
else:
tmpl = (
"<html><head><title>{url}</title>"
"</head><body>{files}</body></html>")
body = tmpl.format(url=url, files='\n'.join(files))
status, message = 200, "OK"
else:
status, message, body = 404, "Path not found", "Not found"
headers = {'content-type': 'text/html'}
body_stream = six.StringIO(body)
return urllib.error.HTTPError(url, status, message, headers, body_stream)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/archive_util.py
|
"""Utilities for extracting common archive formats"""
import zipfile
import tarfile
import os
import shutil
import posixpath
import contextlib
from distutils.errors import DistutilsError
from pkg_resources import ensure_directory
__all__ = [
"unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter",
"UnrecognizedFormat", "extraction_drivers", "unpack_directory",
]
class UnrecognizedFormat(DistutilsError):
"""Couldn't recognize the archive type"""
def default_filter(src, dst):
"""The default progress/filter callback; returns True for all files"""
return dst
def unpack_archive(
filename, extract_dir, progress_filter=default_filter,
drivers=None):
"""Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat``
`progress_filter` is a function taking two arguments: a source path
internal to the archive ('/'-separated), and a filesystem path where it
will be extracted. The callback must return the desired extract path
(which may be the same as the one passed in), or else ``None`` to skip
that file or directory. The callback can thus be used to report on the
progress of the extraction, as well as to filter the items extracted or
alter their extraction paths.
`drivers`, if supplied, must be a non-empty sequence of functions with the
same signature as this function (minus the `drivers` argument), that raise
``UnrecognizedFormat`` if they do not support extracting the designated
archive type. The `drivers` are tried in sequence until one is found that
does not raise an error, or until all are exhausted (in which case
``UnrecognizedFormat`` is raised). If you do not supply a sequence of
drivers, the module's ``extraction_drivers`` constant will be used, which
means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that
order.
"""
for driver in drivers or extraction_drivers:
try:
driver(filename, extract_dir, progress_filter)
except UnrecognizedFormat:
continue
else:
return
else:
raise UnrecognizedFormat(
"Not a recognized archive type: %s" % filename
)
def unpack_directory(filename, extract_dir, progress_filter=default_filter):
""""Unpack" a directory, using the same interface as for archives
Raises ``UnrecognizedFormat`` if `filename` is not a directory
"""
if not os.path.isdir(filename):
raise UnrecognizedFormat("%s is not a directory" % filename)
paths = {
filename: ('', extract_dir),
}
for base, dirs, files in os.walk(filename):
src, dst = paths[base]
for d in dirs:
paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d)
for f in files:
target = os.path.join(dst, f)
target = progress_filter(src + f, target)
if not target:
# skip non-files
continue
ensure_directory(target)
f = os.path.join(base, f)
shutil.copyfile(f, target)
shutil.copystat(f, target)
def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
"""Unpack zip `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
"""
if not zipfile.is_zipfile(filename):
raise UnrecognizedFormat("%s is not a zip file" % (filename,))
with zipfile.ZipFile(filename) as z:
for info in z.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name.split('/'):
continue
target = os.path.join(extract_dir, *name.split('/'))
target = progress_filter(name, target)
if not target:
continue
if name.endswith('/'):
# directory
ensure_directory(target)
else:
# file
ensure_directory(target)
data = z.read(info.filename)
with open(target, 'wb') as f:
f.write(data)
unix_attributes = info.external_attr >> 16
if unix_attributes:
os.chmod(target, unix_attributes)
def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
"""Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
of the `progress_filter` argument.
"""
try:
tarobj = tarfile.open(filename)
except tarfile.TarError as e:
raise UnrecognizedFormat(
"%s is not a compressed or uncompressed tar file" % (filename,)
) from e
with contextlib.closing(tarobj):
# don't do any chowning!
tarobj.chown = lambda *args: None
for member in tarobj:
name = member.name
# don't extract absolute paths or ones with .. in them
if not name.startswith('/') and '..' not in name.split('/'):
prelim_dst = os.path.join(extract_dir, *name.split('/'))
# resolve any links and to extract the link targets as normal
# files
while member is not None and (
member.islnk() or member.issym()):
linkpath = member.linkname
if member.issym():
base = posixpath.dirname(member.name)
linkpath = posixpath.join(base, linkpath)
linkpath = posixpath.normpath(linkpath)
member = tarobj._getmember(linkpath)
if member is not None and (member.isfile() or member.isdir()):
final_dst = progress_filter(name, prelim_dst)
if final_dst:
if final_dst.endswith(os.sep):
final_dst = final_dst[:-1]
try:
# XXX Ugh
tarobj._extract_member(member, final_dst)
except tarfile.ExtractError:
# chown/chmod/mkfifo/mknode/makedev failed
pass
return True
extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_imp.py
|
"""
Re-implementation of find_module and get_frozen_object
from the deprecated imp module.
"""
import os
import importlib.util
import importlib.machinery
from .py34compat import module_from_spec
PY_SOURCE = 1
PY_COMPILED = 2
C_EXTENSION = 3
C_BUILTIN = 6
PY_FROZEN = 7
def find_spec(module, paths):
finder = (
importlib.machinery.PathFinder().find_spec
if isinstance(paths, list) else
importlib.util.find_spec
)
return finder(module, paths)
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
spec = find_spec(module, paths)
if spec is None:
raise ImportError("Can't find %s" % module)
if not spec.has_location and hasattr(spec, 'submodule_search_locations'):
spec = importlib.util.spec_from_loader('__init__.py', spec.loader)
kind = -1
file = None
static = isinstance(spec.loader, type)
if spec.origin == 'frozen' or static and issubclass(
spec.loader, importlib.machinery.FrozenImporter):
kind = PY_FROZEN
path = None # imp compabilty
suffix = mode = '' # imp compability
elif spec.origin == 'built-in' or static and issubclass(
spec.loader, importlib.machinery.BuiltinImporter):
kind = C_BUILTIN
path = None # imp compabilty
suffix = mode = '' # imp compability
elif spec.has_location:
path = spec.origin
suffix = os.path.splitext(path)[1]
mode = 'r' if suffix in importlib.machinery.SOURCE_SUFFIXES else 'rb'
if suffix in importlib.machinery.SOURCE_SUFFIXES:
kind = PY_SOURCE
elif suffix in importlib.machinery.BYTECODE_SUFFIXES:
kind = PY_COMPILED
elif suffix in importlib.machinery.EXTENSION_SUFFIXES:
kind = C_EXTENSION
if kind in {PY_SOURCE, PY_COMPILED}:
file = open(path, mode)
else:
path = None
suffix = mode = ''
return file, path, (suffix, mode, kind)
def get_frozen_object(module, paths=None):
spec = find_spec(module, paths)
if not spec:
raise ImportError("Can't find %s" % module)
return spec.loader.get_code(module)
def get_module(module, paths, info):
spec = find_spec(module, paths)
if not spec:
raise ImportError("Can't find %s" % module)
return module_from_spec(spec)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/config.py
|
from __future__ import absolute_import, unicode_literals
import ast
import io
import os
import sys
import warnings
import functools
import importlib
from collections import defaultdict
from functools import partial
from functools import wraps
import contextlib
from distutils.errors import DistutilsOptionError, DistutilsFileError
from setuptools.extern.packaging.version import LegacyVersion, parse
from setuptools.extern.packaging.specifiers import SpecifierSet
from setuptools.extern.six import string_types, PY3
__metaclass__ = type
class StaticModule:
"""
Attempt to load the module by the name
"""
def __init__(self, name):
spec = importlib.util.find_spec(name)
with open(spec.origin) as strm:
src = strm.read()
module = ast.parse(src)
vars(self).update(locals())
del self.self
def __getattr__(self, attr):
try:
return next(
ast.literal_eval(statement.value)
for statement in self.module.body
if isinstance(statement, ast.Assign)
for target in statement.targets
if isinstance(target, ast.Name) and target.id == attr
)
except Exception as e:
raise AttributeError(
"{self.name} has no attribute {attr}".format(**locals())
) from e
@contextlib.contextmanager
def patch_path(path):
"""
Add path to front of sys.path for the duration of the context.
"""
try:
sys.path.insert(0, path)
yield
finally:
sys.path.remove(path)
def read_configuration(
filepath, find_others=False, ignore_option_errors=False):
"""Read given configuration file and returns options from it as a dict.
:param str|unicode filepath: Path to configuration file
to get options from.
:param bool find_others: Whether to search for other configuration files
which could be on in various places.
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: dict
"""
from setuptools.dist import Distribution, _Distribution
filepath = os.path.abspath(filepath)
if not os.path.isfile(filepath):
raise DistutilsFileError(
'Configuration file %s does not exist.' % filepath)
current_directory = os.getcwd()
os.chdir(os.path.dirname(filepath))
try:
dist = Distribution()
filenames = dist.find_config_files() if find_others else []
if filepath not in filenames:
filenames.append(filepath)
_Distribution.parse_config_files(dist, filenames=filenames)
handlers = parse_configuration(
dist, dist.command_options,
ignore_option_errors=ignore_option_errors)
finally:
os.chdir(current_directory)
return configuration_to_dict(handlers)
def _get_option(target_obj, key):
"""
Given a target object and option key, get that option from
the target object, either through a get_{key} method or
from an attribute directly.
"""
getter_name = 'get_{key}'.format(**locals())
by_attribute = functools.partial(getattr, target_obj, key)
getter = getattr(target_obj, getter_name, by_attribute)
return getter()
def configuration_to_dict(handlers):
"""Returns configuration data gathered by given handlers as a dict.
:param list[ConfigHandler] handlers: Handlers list,
usually from parse_configuration()
:rtype: dict
"""
config_dict = defaultdict(dict)
for handler in handlers:
for option in handler.set_options:
value = _get_option(handler.target_obj, option)
config_dict[handler.section_prefix][option] = value
return config_dict
def parse_configuration(
distribution, command_options, ignore_option_errors=False):
"""Performs additional parsing of configuration options
for a distribution.
Returns a list of used option handlers.
:param Distribution distribution:
:param dict command_options:
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: list
"""
options = ConfigOptionsHandler(
distribution, command_options, ignore_option_errors)
options.parse()
meta = ConfigMetadataHandler(
distribution.metadata, command_options, ignore_option_errors,
distribution.package_dir)
meta.parse()
return meta, options
class ConfigHandler:
"""Handles metadata supplied in configuration files."""
section_prefix = None
"""Prefix for config sections handled by this handler.
Must be provided by class heirs.
"""
aliases = {}
"""Options aliases.
For compatibility with various packages. E.g.: d2to1 and pbr.
Note: `-` in keys is replaced with `_` by config parser.
"""
def __init__(self, target_obj, options, ignore_option_errors=False):
sections = {}
section_prefix = self.section_prefix
for section_name, section_options in options.items():
if not section_name.startswith(section_prefix):
continue
section_name = section_name.replace(section_prefix, '').strip('.')
sections[section_name] = section_options
self.ignore_option_errors = ignore_option_errors
self.target_obj = target_obj
self.sections = sections
self.set_options = []
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
raise NotImplementedError(
'%s must provide .parsers property' % self.__class__.__name__)
def __setitem__(self, option_name, value):
unknown = tuple()
target_obj = self.target_obj
# Translate alias into real name.
option_name = self.aliases.get(option_name, option_name)
current_value = getattr(target_obj, option_name, unknown)
if current_value is unknown:
raise KeyError(option_name)
if current_value:
# Already inhabited. Skipping.
return
skip_option = False
parser = self.parsers.get(option_name)
if parser:
try:
value = parser(value)
except Exception:
skip_option = True
if not self.ignore_option_errors:
raise
if skip_option:
return
setter = getattr(target_obj, 'set_%s' % option_name, None)
if setter is None:
setattr(target_obj, option_name, value)
else:
setter(value)
self.set_options.append(option_name)
@classmethod
def _parse_list(cls, value, separator=','):
"""Represents value as a list.
Value is split either by separator (defaults to comma) or by lines.
:param value:
:param separator: List items separator character.
:rtype: list
"""
if isinstance(value, list): # _get_parser_compound case
return value
if '\n' in value:
value = value.splitlines()
else:
value = value.split(separator)
return [chunk.strip() for chunk in value if chunk.strip()]
@classmethod
def _parse_dict(cls, value):
"""Represents value as a dict.
:param value:
:rtype: dict
"""
separator = '='
result = {}
for line in cls._parse_list(value):
key, sep, val = line.partition(separator)
if sep != separator:
raise DistutilsOptionError(
'Unable to parse option value to dict: %s' % value)
result[key.strip()] = val.strip()
return result
@classmethod
def _parse_bool(cls, value):
"""Represents value as boolean.
:param value:
:rtype: bool
"""
value = value.lower()
return value in ('1', 'true', 'yes')
@classmethod
def _exclude_files_parser(cls, key):
"""Returns a parser function to make sure field inputs
are not files.
Parses a value after getting the key so error messages are
more informative.
:param key:
:rtype: callable
"""
def parser(value):
exclude_directive = 'file:'
if value.startswith(exclude_directive):
raise ValueError(
'Only strings are accepted for the {0} field, '
'files are not accepted'.format(key))
return value
return parser
@classmethod
def _parse_file(cls, value):
"""Represents value as a string, allowing including text
from nearest files using `file:` directive.
Directive is sandboxed and won't reach anything outside
directory with setup.py.
Examples:
file: README.rst, CHANGELOG.md, src/file.txt
:param str value:
:rtype: str
"""
include_directive = 'file:'
if not isinstance(value, string_types):
return value
if not value.startswith(include_directive):
return value
spec = value[len(include_directive):]
filepaths = (os.path.abspath(path.strip()) for path in spec.split(','))
return '\n'.join(
cls._read_file(path)
for path in filepaths
if (cls._assert_local(path) or True)
and os.path.isfile(path)
)
@staticmethod
def _assert_local(filepath):
if not filepath.startswith(os.getcwd()):
raise DistutilsOptionError(
'`file:` directive can not access %s' % filepath)
@staticmethod
def _read_file(filepath):
with io.open(filepath, encoding='utf-8') as f:
return f.read()
@classmethod
def _parse_attr(cls, value, package_dir=None):
"""Represents value as a module attribute.
Examples:
attr: package.attr
attr: package.module.attr
:param str value:
:rtype: str
"""
attr_directive = 'attr:'
if not value.startswith(attr_directive):
return value
attrs_path = value.replace(attr_directive, '').strip().split('.')
attr_name = attrs_path.pop()
module_name = '.'.join(attrs_path)
module_name = module_name or '__init__'
parent_path = os.getcwd()
if package_dir:
if attrs_path[0] in package_dir:
# A custom path was specified for the module we want to import
custom_path = package_dir[attrs_path[0]]
parts = custom_path.rsplit('/', 1)
if len(parts) > 1:
parent_path = os.path.join(os.getcwd(), parts[0])
module_name = parts[1]
else:
module_name = custom_path
elif '' in package_dir:
# A custom parent directory was specified for all root modules
parent_path = os.path.join(os.getcwd(), package_dir[''])
with patch_path(parent_path):
try:
# attempt to load value statically
return getattr(StaticModule(module_name), attr_name)
except Exception:
# fallback to simple import
module = importlib.import_module(module_name)
return getattr(module, attr_name)
@classmethod
def _get_parser_compound(cls, *parse_methods):
"""Returns parser function to represents value as a list.
Parses a value applying given methods one after another.
:param parse_methods:
:rtype: callable
"""
def parse(value):
parsed = value
for method in parse_methods:
parsed = method(parsed)
return parsed
return parse
@classmethod
def _parse_section_to_dict(cls, section_options, values_parser=None):
"""Parses section options into a dictionary.
Optionally applies a given parser to values.
:param dict section_options:
:param callable values_parser:
:rtype: dict
"""
value = {}
values_parser = values_parser or (lambda val: val)
for key, (_, val) in section_options.items():
value[key] = values_parser(val)
return value
def parse_section(self, section_options):
"""Parses configuration file section.
:param dict section_options:
"""
for (name, (_, value)) in section_options.items():
try:
self[name] = value
except KeyError:
pass # Keep silent for a new option may appear anytime.
def parse(self):
"""Parses configuration file items from one
or more related sections.
"""
for section_name, section_options in self.sections.items():
method_postfix = ''
if section_name: # [section.option] variant
method_postfix = '_%s' % section_name
section_parser_method = getattr(
self,
# Dots in section names are translated into dunderscores.
('parse_section%s' % method_postfix).replace('.', '__'),
None)
if section_parser_method is None:
raise DistutilsOptionError(
'Unsupported distribution option section: [%s.%s]' % (
self.section_prefix, section_name))
section_parser_method(section_options)
def _deprecated_config_handler(self, func, msg, warning_class):
""" this function will wrap around parameters that are deprecated
:param msg: deprecation message
:param warning_class: class of warning exception to be raised
:param func: function to be wrapped around
"""
@wraps(func)
def config_handler(*args, **kwargs):
warnings.warn(msg, warning_class)
return func(*args, **kwargs)
return config_handler
class ConfigMetadataHandler(ConfigHandler):
section_prefix = 'metadata'
aliases = {
'home_page': 'url',
'summary': 'description',
'classifier': 'classifiers',
'platform': 'platforms',
}
strict_mode = False
"""We need to keep it loose, to be partially compatible with
`pbr` and `d2to1` packages which also uses `metadata` section.
"""
def __init__(self, target_obj, options, ignore_option_errors=False,
package_dir=None):
super(ConfigMetadataHandler, self).__init__(target_obj, options,
ignore_option_errors)
self.package_dir = package_dir
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_file = self._parse_file
parse_dict = self._parse_dict
exclude_files_parser = self._exclude_files_parser
return {
'platforms': parse_list,
'keywords': parse_list,
'provides': parse_list,
'requires': self._deprecated_config_handler(
parse_list,
"The requires parameter is deprecated, please use "
"install_requires for runtime dependencies.",
DeprecationWarning),
'obsoletes': parse_list,
'classifiers': self._get_parser_compound(parse_file, parse_list),
'license': exclude_files_parser('license'),
'license_files': parse_list,
'description': parse_file,
'long_description': parse_file,
'version': self._parse_version,
'project_urls': parse_dict,
}
def _parse_version(self, value):
"""Parses `version` option value.
:param value:
:rtype: str
"""
version = self._parse_file(value)
if version != value:
version = version.strip()
# Be strict about versions loaded from file because it's easy to
# accidentally include newlines and other unintended content
if isinstance(parse(version), LegacyVersion):
tmpl = (
'Version loaded from {value} does not '
'comply with PEP 440: {version}'
)
raise DistutilsOptionError(tmpl.format(**locals()))
return version
version = self._parse_attr(value, self.package_dir)
if callable(version):
version = version()
if not isinstance(version, string_types):
if hasattr(version, '__iter__'):
version = '.'.join(map(str, version))
else:
version = '%s' % version
return version
class ConfigOptionsHandler(ConfigHandler):
section_prefix = 'options'
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_list_semicolon = partial(self._parse_list, separator=';')
parse_bool = self._parse_bool
parse_dict = self._parse_dict
return {
'zip_safe': parse_bool,
'use_2to3': parse_bool,
'include_package_data': parse_bool,
'package_dir': parse_dict,
'use_2to3_fixers': parse_list,
'use_2to3_exclude_fixers': parse_list,
'convert_2to3_doctests': parse_list,
'scripts': parse_list,
'eager_resources': parse_list,
'dependency_links': parse_list,
'namespace_packages': parse_list,
'install_requires': parse_list_semicolon,
'setup_requires': parse_list_semicolon,
'tests_require': parse_list_semicolon,
'packages': self._parse_packages,
'entry_points': self._parse_file,
'py_modules': parse_list,
'python_requires': SpecifierSet,
}
def _parse_packages(self, value):
"""Parses `packages` option value.
:param value:
:rtype: list
"""
find_directives = ['find:', 'find_namespace:']
trimmed_value = value.strip()
if trimmed_value not in find_directives:
return self._parse_list(value)
findns = trimmed_value == find_directives[1]
if findns and not PY3:
raise DistutilsOptionError(
'find_namespace: directive is unsupported on Python < 3.3')
# Read function arguments from a dedicated section.
find_kwargs = self.parse_section_packages__find(
self.sections.get('packages.find', {}))
if findns:
from setuptools import find_namespace_packages as find_packages
else:
from setuptools import find_packages
return find_packages(**find_kwargs)
def parse_section_packages__find(self, section_options):
"""Parses `packages.find` configuration file section.
To be used in conjunction with _parse_packages().
:param dict section_options:
"""
section_data = self._parse_section_to_dict(
section_options, self._parse_list)
valid_keys = ['where', 'include', 'exclude']
find_kwargs = dict(
[(k, v) for k, v in section_data.items() if k in valid_keys and v])
where = find_kwargs.get('where')
if where is not None:
find_kwargs['where'] = where[0] # cast list to single val
return find_kwargs
def parse_section_entry_points(self, section_options):
"""Parses `entry_points` configuration file section.
:param dict section_options:
"""
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self['entry_points'] = parsed
def _parse_package_data(self, section_options):
parsed = self._parse_section_to_dict(section_options, self._parse_list)
root = parsed.get('*')
if root:
parsed[''] = root
del parsed['*']
return parsed
def parse_section_package_data(self, section_options):
"""Parses `package_data` configuration file section.
:param dict section_options:
"""
self['package_data'] = self._parse_package_data(section_options)
def parse_section_exclude_package_data(self, section_options):
"""Parses `exclude_package_data` configuration file section.
:param dict section_options:
"""
self['exclude_package_data'] = self._parse_package_data(
section_options)
def parse_section_extras_require(self, section_options):
"""Parses `extras_require` configuration file section.
:param dict section_options:
"""
parse_list = partial(self._parse_list, separator=';')
self['extras_require'] = self._parse_section_to_dict(
section_options, parse_list)
def parse_section_data_files(self, section_options):
"""Parses `data_files` configuration file section.
:param dict section_options:
"""
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self['data_files'] = [(k, v) for k, v in parsed.items()]
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/version.py
|
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('setuptools').version
except Exception:
__version__ = 'unknown'
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/py31compat.py
|
__all__ = []
__metaclass__ = type
try:
# Python >=3.2
from tempfile import TemporaryDirectory
except ImportError:
import shutil
import tempfile
class TemporaryDirectory:
"""
Very simple temporary directory context manager.
Will try to delete afterward, but will also ignore OS and similar
errors on deletion.
"""
def __init__(self, **kwargs):
self.name = None # Handle mkdtemp raising an exception
self.name = tempfile.mkdtemp(**kwargs)
def __enter__(self):
return self.name
def __exit__(self, exctype, excvalue, exctrace):
try:
shutil.rmtree(self.name, True)
except OSError: # removal errors are not the only possible
pass
self.name = None
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/depends.py
|
import sys
import marshal
import contextlib
from distutils.version import StrictVersion
from .py33compat import Bytecode
from .py27compat import find_module, PY_COMPILED, PY_FROZEN, PY_SOURCE
from . import py27compat
__all__ = [
'Require', 'find_module', 'get_module_constant', 'extract_constant'
]
class Require:
"""A prerequisite to building or installing a distribution"""
def __init__(
self, name, requested_version, module, homepage='',
attribute=None, format=None):
if format is None and requested_version is not None:
format = StrictVersion
if format is not None:
requested_version = format(requested_version)
if attribute is None:
attribute = '__version__'
self.__dict__.update(locals())
del self.self
def full_name(self):
"""Return full package/distribution name, w/version"""
if self.requested_version is not None:
return '%s-%s' % (self.name, self.requested_version)
return self.name
def version_ok(self, version):
"""Is 'version' sufficiently up-to-date?"""
return self.attribute is None or self.format is None or \
str(version) != "unknown" and version >= self.requested_version
def get_version(self, paths=None, default="unknown"):
"""Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'.
"""
if self.attribute is None:
try:
f, p, i = find_module(self.module, paths)
if f:
f.close()
return default
except ImportError:
return None
v = get_module_constant(self.module, self.attribute, default, paths)
if v is not None and v is not default and self.format is not None:
return self.format(v)
return v
def is_present(self, paths=None):
"""Return true if dependency is present on 'paths'"""
return self.get_version(paths) is not None
def is_current(self, paths=None):
"""Return true if dependency is present and up-to-date on 'paths'"""
version = self.get_version(paths)
if version is None:
return False
return self.version_ok(version)
def maybe_close(f):
@contextlib.contextmanager
def empty():
yield
return
if not f:
return empty()
return contextlib.closing(f)
def get_module_constant(module, symbol, default=-1, paths=None):
"""Find 'module' by searching 'paths', and extract 'symbol'
Return 'None' if 'module' does not exist on 'paths', or it does not define
'symbol'. If the module defines 'symbol' as a constant, return the
constant. Otherwise, return 'default'."""
try:
f, path, (suffix, mode, kind) = info = find_module(module, paths)
except ImportError:
# Module doesn't exist
return None
with maybe_close(f):
if kind == PY_COMPILED:
f.read(8) # skip magic & date
code = marshal.load(f)
elif kind == PY_FROZEN:
code = py27compat.get_frozen_object(module, paths)
elif kind == PY_SOURCE:
code = compile(f.read(), path, 'exec')
else:
# Not something we can parse; we'll have to import it. :(
imported = py27compat.get_module(module, paths, info)
return getattr(imported, symbol, None)
return extract_constant(code, symbol, default)
def extract_constant(code, symbol, default=-1):
"""Extract the constant value of 'symbol' from 'code'
If the name 'symbol' is bound to a constant value by the Python code
object 'code', return that value. If 'symbol' is bound to an expression,
return 'default'. Otherwise, return 'None'.
Return value is based on the first assignment to 'symbol'. 'symbol' must
be a global, or at least a non-"fast" local in the code block. That is,
only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
must be present in 'code.co_names'.
"""
if symbol not in code.co_names:
# name's not there, can't possibly be an assignment
return None
name_idx = list(code.co_names).index(symbol)
STORE_NAME = 90
STORE_GLOBAL = 97
LOAD_CONST = 100
const = default
for byte_code in Bytecode(code):
op = byte_code.opcode
arg = byte_code.arg
if op == LOAD_CONST:
const = code.co_consts[arg]
elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL):
return const
else:
const = default
def _update_globals():
"""
Patch the globals to remove the objects not available on some platforms.
XXX it'd be better to test assertions about bytecode instead.
"""
if not sys.platform.startswith('java') and sys.platform != 'cli':
return
incompatible = 'extract_constant', 'get_module_constant'
for name in incompatible:
del globals()[name]
__all__.remove(name)
_update_globals()
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/py27compat.py
|
"""
Compatibility Support for Python 2.7 and earlier
"""
import sys
import platform
from setuptools.extern import six
def get_all_headers(message, key):
"""
Given an HTTPMessage, return all headers matching a given key.
"""
return message.get_all(key)
if six.PY2:
def get_all_headers(message, key): # noqa
return message.getheaders(key)
linux_py2_ascii = (
platform.system() == 'Linux' and
six.PY2
)
rmtree_safe = str if linux_py2_ascii else lambda x: x
"""Workaround for http://bugs.python.org/issue24672"""
try:
from ._imp import find_module, PY_COMPILED, PY_FROZEN, PY_SOURCE
from ._imp import get_frozen_object, get_module
except ImportError:
import imp
from imp import PY_COMPILED, PY_FROZEN, PY_SOURCE # noqa
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
parts = module.split('.')
while parts:
part = parts.pop(0)
f, path, (suffix, mode, kind) = info = imp.find_module(part, paths)
if kind == imp.PKG_DIRECTORY:
parts = parts or ['__init__']
paths = [path]
elif parts:
raise ImportError("Can't find %r in %s" % (parts, module))
return info
def get_frozen_object(module, paths):
return imp.get_frozen_object(module)
def get_module(module, paths, info):
imp.load_module(module, *info)
return sys.modules[module]
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/__init__.py
|
"""Extensions to the 'distutils' for large or complex distributions"""
import os
import functools
# Disabled for now due to: #2228, #2230
import setuptools.distutils_patch # noqa: F401
import distutils.core
import distutils.filelist
import re
from distutils.errors import DistutilsOptionError
from distutils.util import convert_path
from fnmatch import fnmatchcase
from ._deprecation_warning import SetuptoolsDeprecationWarning
from setuptools.extern.six import PY3, string_types
from setuptools.extern.six.moves import filter, map
import setuptools.version
from setuptools.extension import Extension
from setuptools.dist import Distribution
from setuptools.depends import Require
from . import monkey
__metaclass__ = type
__all__ = [
'setup', 'Distribution', 'Command', 'Extension', 'Require',
'SetuptoolsDeprecationWarning',
'find_packages'
]
if PY3:
__all__.append('find_namespace_packages')
__version__ = setuptools.version.__version__
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
class PackageFinder:
"""
Generate a list of all Python packages found within a directory
"""
@classmethod
def find(cls, where='.', exclude=(), include=('*',)):
"""Return a list all Python packages found within directory 'where'
'where' is the root directory which will be searched for packages. It
should be supplied as a "cross-platform" (i.e. URL-style) path; it will
be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
"""
return list(cls._find_packages_iter(
convert_path(where),
cls._build_filter('ez_setup', '*__pycache__', *exclude),
cls._build_filter(*include)))
@classmethod
def _find_packages_iter(cls, where, exclude, include):
"""
All the packages found in 'where' that pass the 'include' filter, but
not the 'exclude' filter.
"""
for root, dirs, files in os.walk(where, followlinks=True):
# Copy dirs to iterate over it, then empty dirs.
all_dirs = dirs[:]
dirs[:] = []
for dir in all_dirs:
full_path = os.path.join(root, dir)
rel_path = os.path.relpath(full_path, where)
package = rel_path.replace(os.path.sep, '.')
# Skip directory trees that are not valid packages
if ('.' in dir or not cls._looks_like_package(full_path)):
continue
# Should this package be included?
if include(package) and not exclude(package):
yield package
# Keep searching subdirectories, as there may be more packages
# down there, even if the parent was excluded.
dirs.append(dir)
@staticmethod
def _looks_like_package(path):
"""Does a directory look like a package?"""
return os.path.isfile(os.path.join(path, '__init__.py'))
@staticmethod
def _build_filter(*patterns):
"""
Given a list of patterns, return a callable that will be true only if
the input matches at least one of the patterns.
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
class PEP420PackageFinder(PackageFinder):
@staticmethod
def _looks_like_package(path):
return True
find_packages = PackageFinder.find
if PY3:
find_namespace_packages = PEP420PackageFinder.find
def _install_setup_requires(attrs):
# Note: do not use `setuptools.Distribution` directly, as
# our PEP 517 backend patch `distutils.core.Distribution`.
class MinimalDistribution(distutils.core.Distribution):
"""
A minimal version of a distribution for supporting the
fetch_build_eggs interface.
"""
def __init__(self, attrs):
_incl = 'dependency_links', 'setup_requires'
filtered = {
k: attrs[k]
for k in set(_incl) & set(attrs)
}
distutils.core.Distribution.__init__(self, filtered)
def finalize_options(self):
"""
Disable finalize_options to avoid building the working set.
Ref #2158.
"""
dist = MinimalDistribution(attrs)
# Honor setup.cfg's options.
dist.parse_config_files(ignore_option_errors=True)
if dist.setup_requires:
dist.fetch_build_eggs(dist.setup_requires)
def setup(**attrs):
# Make sure we have any requirements needed to interpret 'attrs'.
_install_setup_requires(attrs)
return distutils.core.setup(**attrs)
setup.__doc__ = distutils.core.setup.__doc__
_Command = monkey.get_unpatched(distutils.core.Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
"""
Construct the command for dist, updating
vars(self) with any keyword parameters.
"""
_Command.__init__(self, dist)
vars(self).update(kw)
def _ensure_stringlike(self, option, what, default=None):
val = getattr(self, option)
if val is None:
setattr(self, option, default)
return default
elif not isinstance(val, string_types):
raise DistutilsOptionError("'%s' must be a %s (got `%s`)"
% (option, what, val))
return val
def ensure_string_list(self, option):
r"""Ensure that 'option' is a list of strings. If 'option' is
currently a string, we split it either on /,\s*/ or /\s+/, so
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
["foo", "bar", "baz"].
"""
val = getattr(self, option)
if val is None:
return
elif isinstance(val, string_types):
setattr(self, option, re.split(r',\s*|\s+', val))
else:
if isinstance(val, list):
ok = all(isinstance(v, string_types) for v in val)
else:
ok = False
if not ok:
raise DistutilsOptionError(
"'%s' must be a list of strings (got %r)"
% (option, val))
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
vars(cmd).update(kw)
return cmd
def _find_all_simple(path):
"""
Find all files under 'path'
"""
results = (
os.path.join(base, file)
for base, dirs, files in os.walk(path, followlinks=True)
for file in files
)
return filter(os.path.isfile, results)
def findall(dir=os.curdir):
"""
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
"""
files = _find_all_simple(dir)
if dir == os.curdir:
make_rel = functools.partial(os.path.relpath, start=dir)
files = map(make_rel, files)
return list(files)
class sic(str):
"""Treat this string as-is (https://en.wikipedia.org/wiki/Sic)"""
# Apply monkey patches
monkey.patch_all()
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/installer.py
|
import glob
import os
import subprocess
import sys
from distutils import log
from distutils.errors import DistutilsError
import pkg_resources
from setuptools.command.easy_install import easy_install
from setuptools.extern import six
from setuptools.wheel import Wheel
from .py31compat import TemporaryDirectory
def _fixup_find_links(find_links):
"""Ensure find-links option end-up being a list of strings."""
if isinstance(find_links, six.string_types):
return find_links.split()
assert isinstance(find_links, (tuple, list))
return find_links
def _legacy_fetch_build_egg(dist, req):
"""Fetch an egg needed for building.
Legacy path using EasyInstall.
"""
tmp_dist = dist.__class__({'script_args': ['easy_install']})
opts = tmp_dist.get_option_dict('easy_install')
opts.clear()
opts.update(
(k, v)
for k, v in dist.get_option_dict('easy_install').items()
if k in (
# don't use any other settings
'find_links', 'site_dirs', 'index_url',
'optimize', 'site_dirs', 'allow_hosts',
))
if dist.dependency_links:
links = dist.dependency_links[:]
if 'find_links' in opts:
links = _fixup_find_links(opts['find_links'][1]) + links
opts['find_links'] = ('setup', links)
install_dir = dist.get_egg_cache_dir()
cmd = easy_install(
tmp_dist, args=["x"], install_dir=install_dir,
exclude_scripts=True,
always_copy=False, build_directory=None, editable=False,
upgrade=False, multi_version=True, no_report=True, user=False
)
cmd.ensure_finalized()
return cmd.easy_install(req)
def fetch_build_egg(dist, req):
"""Fetch an egg needed for building.
Use pip/wheel to fetch/build a wheel."""
# Check pip is available.
try:
pkg_resources.get_distribution('pip')
except pkg_resources.DistributionNotFound:
dist.announce(
'WARNING: The pip package is not available, falling back '
'to EasyInstall for handling setup_requires/test_requires; '
'this is deprecated and will be removed in a future version.',
log.WARN
)
return _legacy_fetch_build_egg(dist, req)
# Warn if wheel is not.
try:
pkg_resources.get_distribution('wheel')
except pkg_resources.DistributionNotFound:
dist.announce('WARNING: The wheel package is not available.', log.WARN)
# Ignore environment markers; if supplied, it is required.
req = strip_marker(req)
# Take easy_install options into account, but do not override relevant
# pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll
# take precedence.
opts = dist.get_option_dict('easy_install')
if 'allow_hosts' in opts:
raise DistutilsError('the `allow-hosts` option is not supported '
'when using pip to install requirements.')
if 'PIP_QUIET' in os.environ or 'PIP_VERBOSE' in os.environ:
quiet = False
else:
quiet = True
if 'PIP_INDEX_URL' in os.environ:
index_url = None
elif 'index_url' in opts:
index_url = opts['index_url'][1]
else:
index_url = None
if 'find_links' in opts:
find_links = _fixup_find_links(opts['find_links'][1])[:]
else:
find_links = []
if dist.dependency_links:
find_links.extend(dist.dependency_links)
eggs_dir = os.path.realpath(dist.get_egg_cache_dir())
environment = pkg_resources.Environment()
for egg_dist in pkg_resources.find_distributions(eggs_dir):
if egg_dist in req and environment.can_add(egg_dist):
return egg_dist
with TemporaryDirectory() as tmpdir:
cmd = [
sys.executable, '-m', 'pip',
'--disable-pip-version-check',
'wheel', '--no-deps',
'-w', tmpdir,
]
if quiet:
cmd.append('--quiet')
if index_url is not None:
cmd.extend(('--index-url', index_url))
if find_links is not None:
for link in find_links:
cmd.extend(('--find-links', link))
# If requirement is a PEP 508 direct URL, directly pass
# the URL to pip, as `req @ url` does not work on the
# command line.
if req.url:
cmd.append(req.url)
else:
cmd.append(str(req))
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError as e:
raise DistutilsError(str(e)) from e
wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0])
dist_location = os.path.join(eggs_dir, wheel.egg_name())
wheel.install_as_egg(dist_location)
dist_metadata = pkg_resources.PathMetadata(
dist_location, os.path.join(dist_location, 'EGG-INFO'))
dist = pkg_resources.Distribution.from_filename(
dist_location, metadata=dist_metadata)
return dist
def strip_marker(req):
"""
Return a new requirement without the environment marker to avoid
calling pip with something like `babel; extra == "i18n"`, which
would always be ignored.
"""
# create a copy to avoid mutating the input
req = pkg_resources.Requirement.parse(str(req))
req.marker = None
return req
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/glob.py
|
"""
Filename globbing utility. Mostly a copy of `glob` from Python 3.5.
Changes include:
* `yield from` and PEP3102 `*` removed.
* Hidden files are not ignored.
"""
import os
import re
import fnmatch
__all__ = ["glob", "iglob", "escape"]
def glob(pathname, recursive=False):
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la
fnmatch. However, unlike fnmatch, filenames starting with a
dot are special cases that are not matched by '*' and '?'
patterns.
If recursive is true, the pattern '**' will match any files and
zero or more directories and subdirectories.
"""
return list(iglob(pathname, recursive=recursive))
def iglob(pathname, recursive=False):
"""Return an iterator which yields the paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la
fnmatch. However, unlike fnmatch, filenames starting with a
dot are special cases that are not matched by '*' and '?'
patterns.
If recursive is true, the pattern '**' will match any files and
zero or more directories and subdirectories.
"""
it = _iglob(pathname, recursive)
if recursive and _isrecursive(pathname):
s = next(it) # skip empty string
assert not s
return it
def _iglob(pathname, recursive):
dirname, basename = os.path.split(pathname)
if not has_magic(pathname):
if basename:
if os.path.lexists(pathname):
yield pathname
else:
# Patterns ending with a slash should match only directories
if os.path.isdir(dirname):
yield pathname
return
if not dirname:
if recursive and _isrecursive(basename):
for x in glob2(dirname, basename):
yield x
else:
for x in glob1(dirname, basename):
yield x
return
# `os.path.split()` returns the argument itself as a dirname if it is a
# drive or UNC path. Prevent an infinite recursion if a drive or UNC path
# contains magic characters (i.e. r'\\?\C:').
if dirname != pathname and has_magic(dirname):
dirs = _iglob(dirname, recursive)
else:
dirs = [dirname]
if has_magic(basename):
if recursive and _isrecursive(basename):
glob_in_dir = glob2
else:
glob_in_dir = glob1
else:
glob_in_dir = glob0
for dirname in dirs:
for name in glob_in_dir(dirname, basename):
yield os.path.join(dirname, name)
# These 2 helper functions non-recursively glob inside a literal directory.
# They return a list of basenames. `glob1` accepts a pattern while `glob0`
# takes a literal basename (so it only has to check for its existence).
def glob1(dirname, pattern):
if not dirname:
if isinstance(pattern, bytes):
dirname = os.curdir.encode('ASCII')
else:
dirname = os.curdir
try:
names = os.listdir(dirname)
except OSError:
return []
return fnmatch.filter(names, pattern)
def glob0(dirname, basename):
if not basename:
# `os.path.split()` returns an empty basename for paths ending with a
# directory separator. 'q*x/' should match only directories.
if os.path.isdir(dirname):
return [basename]
else:
if os.path.lexists(os.path.join(dirname, basename)):
return [basename]
return []
# This helper function recursively yields relative pathnames inside a literal
# directory.
def glob2(dirname, pattern):
assert _isrecursive(pattern)
yield pattern[:0]
for x in _rlistdir(dirname):
yield x
# Recursively yields relative pathnames inside a literal directory.
def _rlistdir(dirname):
if not dirname:
if isinstance(dirname, bytes):
dirname = os.curdir.encode('ASCII')
else:
dirname = os.curdir
try:
names = os.listdir(dirname)
except os.error:
return
for x in names:
yield x
path = os.path.join(dirname, x) if dirname else x
for y in _rlistdir(path):
yield os.path.join(x, y)
magic_check = re.compile('([*?[])')
magic_check_bytes = re.compile(b'([*?[])')
def has_magic(s):
if isinstance(s, bytes):
match = magic_check_bytes.search(s)
else:
match = magic_check.search(s)
return match is not None
def _isrecursive(pattern):
if isinstance(pattern, bytes):
return pattern == b'**'
else:
return pattern == '**'
def escape(pathname):
"""Escape all special characters.
"""
# Escaping is done by wrapping any of "*?[" between square brackets.
# Metacharacters do not work in the drive part and shouldn't be escaped.
drive, pathname = os.path.splitdrive(pathname)
if isinstance(pathname, bytes):
pathname = magic_check_bytes.sub(br'[\1]', pathname)
else:
pathname = magic_check.sub(r'[\1]', pathname)
return drive + pathname
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/sandbox.py
|
import os
import sys
import tempfile
import operator
import functools
import itertools
import re
import contextlib
import pickle
import textwrap
from setuptools.extern import six
from setuptools.extern.six.moves import builtins, map
import pkg_resources
from distutils.errors import DistutilsError
from pkg_resources import working_set
if sys.platform.startswith('java'):
import org.python.modules.posix.PosixModule as _os
else:
_os = sys.modules[os.name]
try:
_file = file
except NameError:
_file = None
_open = open
__all__ = [
"AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
]
def _execfile(filename, globals, locals=None):
"""
Python 3 implementation of execfile.
"""
mode = 'rb'
with open(filename, mode) as stream:
script = stream.read()
if locals is None:
locals = globals
code = compile(script, filename, 'exec')
exec(code, globals, locals)
@contextlib.contextmanager
def save_argv(repl=None):
saved = sys.argv[:]
if repl is not None:
sys.argv[:] = repl
try:
yield saved
finally:
sys.argv[:] = saved
@contextlib.contextmanager
def save_path():
saved = sys.path[:]
try:
yield saved
finally:
sys.path[:] = saved
@contextlib.contextmanager
def override_temp(replacement):
"""
Monkey-patch tempfile.tempdir with replacement, ensuring it exists
"""
os.makedirs(replacement, exist_ok=True)
saved = tempfile.tempdir
tempfile.tempdir = replacement
try:
yield
finally:
tempfile.tempdir = saved
@contextlib.contextmanager
def pushd(target):
saved = os.getcwd()
os.chdir(target)
try:
yield saved
finally:
os.chdir(saved)
class UnpickleableException(Exception):
"""
An exception representing another Exception that could not be pickled.
"""
@staticmethod
def dump(type, exc):
"""
Always return a dumped (pickled) type and exc. If exc can't be pickled,
wrap it in UnpickleableException first.
"""
try:
return pickle.dumps(type), pickle.dumps(exc)
except Exception:
# get UnpickleableException inside the sandbox
from setuptools.sandbox import UnpickleableException as cls
return cls.dump(cls, cls(repr(exc)))
class ExceptionSaver:
"""
A Context Manager that will save an exception, serialized, and restore it
later.
"""
def __enter__(self):
return self
def __exit__(self, type, exc, tb):
if not exc:
return
# dump the exception
self._saved = UnpickleableException.dump(type, exc)
self._tb = tb
# suppress the exception
return True
def resume(self):
"restore and re-raise any exception"
if '_saved' not in vars(self):
return
type, exc = map(pickle.loads, self._saved)
six.reraise(type, exc, self._tb)
@contextlib.contextmanager
def save_modules():
"""
Context in which imported modules are saved.
Translates exceptions internal to the context into the equivalent exception
outside the context.
"""
saved = sys.modules.copy()
with ExceptionSaver() as saved_exc:
yield saved
sys.modules.update(saved)
# remove any modules imported since
del_modules = (
mod_name for mod_name in sys.modules
if mod_name not in saved
# exclude any encodings modules. See #285
and not mod_name.startswith('encodings.')
)
_clear_modules(del_modules)
saved_exc.resume()
def _clear_modules(module_names):
for mod_name in list(module_names):
del sys.modules[mod_name]
@contextlib.contextmanager
def save_pkg_resources_state():
saved = pkg_resources.__getstate__()
try:
yield saved
finally:
pkg_resources.__setstate__(saved)
@contextlib.contextmanager
def setup_context(setup_dir):
temp_dir = os.path.join(setup_dir, 'temp')
with save_pkg_resources_state():
with save_modules():
hide_setuptools()
with save_path():
with save_argv():
with override_temp(temp_dir):
with pushd(setup_dir):
# ensure setuptools commands are available
__import__('setuptools')
yield
def _needs_hiding(mod_name):
"""
>>> _needs_hiding('setuptools')
True
>>> _needs_hiding('pkg_resources')
True
>>> _needs_hiding('setuptools_plugin')
False
>>> _needs_hiding('setuptools.__init__')
True
>>> _needs_hiding('distutils')
True
>>> _needs_hiding('os')
False
>>> _needs_hiding('Cython')
True
"""
pattern = re.compile(r'(setuptools|pkg_resources|distutils|Cython)(\.|$)')
return bool(pattern.match(mod_name))
def hide_setuptools():
"""
Remove references to setuptools' modules from sys.modules to allow the
invocation to import the most appropriate setuptools. This technique is
necessary to avoid issues such as #315 where setuptools upgrading itself
would fail to find a function declared in the metadata.
"""
modules = filter(_needs_hiding, sys.modules)
_clear_modules(modules)
def run_setup(setup_script, args):
"""Run a distutils setup script, sandboxed in its directory"""
setup_dir = os.path.abspath(os.path.dirname(setup_script))
with setup_context(setup_dir):
try:
sys.argv[:] = [setup_script] + list(args)
sys.path.insert(0, setup_dir)
# reset to include setup dir, w/clean callback list
working_set.__init__()
working_set.callbacks.append(lambda dist: dist.activate())
# __file__ should be a byte string on Python 2 (#712)
dunder_file = (
setup_script
if isinstance(setup_script, str) else
setup_script.encode(sys.getfilesystemencoding())
)
with DirectorySandbox(setup_dir):
ns = dict(__file__=dunder_file, __name__='__main__')
_execfile(setup_script, ns)
except SystemExit as v:
if v.args and v.args[0]:
raise
# Normal exit, just return
class AbstractSandbox:
"""Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
_active = False
def __init__(self):
self._attrs = [
name for name in dir(_os)
if not name.startswith('_') and hasattr(self, name)
]
def _copy(self, source):
for name in self._attrs:
setattr(os, name, getattr(source, name))
def __enter__(self):
self._copy(self)
if _file:
builtins.file = self._file
builtins.open = self._open
self._active = True
def __exit__(self, exc_type, exc_value, traceback):
self._active = False
if _file:
builtins.file = _file
builtins.open = _open
self._copy(_os)
def run(self, func):
"""Run 'func' under os sandboxing"""
with self:
return func()
def _mk_dual_path_wrapper(name):
original = getattr(_os, name)
def wrap(self, src, dst, *args, **kw):
if self._active:
src, dst = self._remap_pair(name, src, dst, *args, **kw)
return original(src, dst, *args, **kw)
return wrap
for name in ["rename", "link", "symlink"]:
if hasattr(_os, name):
locals()[name] = _mk_dual_path_wrapper(name)
def _mk_single_path_wrapper(name, original=None):
original = original or getattr(_os, name)
def wrap(self, path, *args, **kw):
if self._active:
path = self._remap_input(name, path, *args, **kw)
return original(path, *args, **kw)
return wrap
if _file:
_file = _mk_single_path_wrapper('file', _file)
_open = _mk_single_path_wrapper('open', _open)
for name in [
"stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
"remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
"startfile", "mkfifo", "mknod", "pathconf", "access"
]:
if hasattr(_os, name):
locals()[name] = _mk_single_path_wrapper(name)
def _mk_single_with_return(name):
original = getattr(_os, name)
def wrap(self, path, *args, **kw):
if self._active:
path = self._remap_input(name, path, *args, **kw)
return self._remap_output(name, original(path, *args, **kw))
return original(path, *args, **kw)
return wrap
for name in ['readlink', 'tempnam']:
if hasattr(_os, name):
locals()[name] = _mk_single_with_return(name)
def _mk_query(name):
original = getattr(_os, name)
def wrap(self, *args, **kw):
retval = original(*args, **kw)
if self._active:
return self._remap_output(name, retval)
return retval
return wrap
for name in ['getcwd', 'tmpnam']:
if hasattr(_os, name):
locals()[name] = _mk_query(name)
def _validate_path(self, path):
"""Called to remap or validate any path, whether input or output"""
return path
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
return self._validate_path(path)
def _remap_output(self, operation, path):
"""Called for path outputs"""
return self._validate_path(path)
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
return (
self._remap_input(operation + '-from', src, *args, **kw),
self._remap_input(operation + '-to', dst, *args, **kw)
)
if hasattr(os, 'devnull'):
_EXCEPTIONS = [os.devnull]
else:
_EXCEPTIONS = []
class DirectorySandbox(AbstractSandbox):
"""Restrict operations to a single subdirectory - pseudo-chroot"""
write_ops = dict.fromkeys([
"open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
"utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
])
_exception_patterns = [
# Allow lib2to3 to attempt to save a pickled grammar object (#121)
r'.*lib2to3.*\.pickle$',
]
"exempt writing to paths that match the pattern"
def __init__(self, sandbox, exceptions=_EXCEPTIONS):
self._sandbox = os.path.normcase(os.path.realpath(sandbox))
self._prefix = os.path.join(self._sandbox, '')
self._exceptions = [
os.path.normcase(os.path.realpath(path))
for path in exceptions
]
AbstractSandbox.__init__(self)
def _violation(self, operation, *args, **kw):
from setuptools.sandbox import SandboxViolation
raise SandboxViolation(operation, args, kw)
if _file:
def _file(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("file", path, mode, *args, **kw)
return _file(path, mode, *args, **kw)
def _open(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("open", path, mode, *args, **kw)
return _open(path, mode, *args, **kw)
def tmpnam(self):
self._violation("tmpnam")
def _ok(self, path):
active = self._active
try:
self._active = False
realpath = os.path.normcase(os.path.realpath(path))
return (
self._exempted(realpath)
or realpath == self._sandbox
or realpath.startswith(self._prefix)
)
finally:
self._active = active
def _exempted(self, filepath):
start_matches = (
filepath.startswith(exception)
for exception in self._exceptions
)
pattern_matches = (
re.match(pattern, filepath)
for pattern in self._exception_patterns
)
candidates = itertools.chain(start_matches, pattern_matches)
return any(candidates)
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
if operation in self.write_ops and not self._ok(path):
self._violation(operation, os.path.realpath(path), *args, **kw)
return path
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
if not self._ok(src) or not self._ok(dst):
self._violation(operation, src, dst, *args, **kw)
return (src, dst)
def open(self, file, flags, mode=0o777, *args, **kw):
"""Called for low-level os.open()"""
if flags & WRITE_FLAGS and not self._ok(file):
self._violation("os.open", file, flags, mode, *args, **kw)
return _os.open(file, flags, mode, *args, **kw)
WRITE_FLAGS = functools.reduce(
operator.or_, [
getattr(_os, a, 0) for a in
"O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()]
)
class SandboxViolation(DistutilsError):
"""A setup script attempted to modify the filesystem outside the sandbox"""
tmpl = textwrap.dedent("""
SandboxViolation: {cmd}{args!r} {kwargs}
The package setup script has attempted to modify files on your system
that are not within the EasyInstall build area, and has been aborted.
This package cannot be safely installed by EasyInstall, and may not
support alternate installation locations even if you run its setup
script by hand. Please inform the package's author and the EasyInstall
maintainers to find out if a fix or workaround is available.
""").lstrip()
def __str__(self):
cmd, args, kwargs = self.args
return self.tmpl.format(**locals())
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/py34compat.py
|
import importlib
try:
import importlib.util
except ImportError:
pass
try:
module_from_spec = importlib.util.module_from_spec
except AttributeError:
def module_from_spec(spec):
return spec.loader.load_module(spec.name)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/script.tmpl
|
# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r
__requires__ = %(spec)r
__import__('pkg_resources').run_script(%(spec)r, %(script_name)r)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/launch.py
|
"""
Launch the Python script on the command line after
setuptools is bootstrapped via import.
"""
# Note that setuptools gets imported implicitly by the
# invocation of this script using python -m setuptools.launch
import tokenize
import sys
def run():
"""
Run the script in sys.argv[1] as if it had
been invoked naturally.
"""
__builtins__
script_name = sys.argv[1]
namespace = dict(
__file__=script_name,
__name__='__main__',
__doc__=None,
)
sys.argv[:] = sys.argv[1:]
open_ = getattr(tokenize, 'open', open)
with open_(script_name) as fid:
script = fid.read()
norm_script = script.replace('\\r\\n', '\\n')
code = compile(norm_script, script_name, 'exec')
exec(code, namespace)
if __name__ == '__main__':
run()
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/extension.py
|
import re
import functools
import distutils.core
import distutils.errors
import distutils.extension
from setuptools.extern.six.moves import map
from .monkey import get_unpatched
def _have_cython():
"""
Return True if Cython can be imported.
"""
cython_impl = 'Cython.Distutils.build_ext'
try:
# from (cython_impl) import build_ext
__import__(cython_impl, fromlist=['build_ext']).build_ext
return True
except Exception:
pass
return False
# for compatibility
have_pyrex = _have_cython
_Extension = get_unpatched(distutils.core.Extension)
class Extension(_Extension):
"""Extension that uses '.c' files in place of '.pyx' files"""
def __init__(self, name, sources, *args, **kw):
# The *args is needed for compatibility as calls may use positional
# arguments. py_limited_api may be set only via keyword.
self.py_limited_api = kw.pop("py_limited_api", False)
_Extension.__init__(self, name, sources, *args, **kw)
def _convert_pyx_sources_to_lang(self):
"""
Replace sources with .pyx extensions to sources with the target
language extension. This mechanism allows language authors to supply
pre-converted sources but to prefer the .pyx sources.
"""
if _have_cython():
# the build has Cython, so allow it to compile the .pyx files
return
lang = self.language or ''
target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
sub = functools.partial(re.sub, '.pyx$', target_ext)
self.sources = list(map(sub, self.sources))
class Library(Extension):
"""Just like a regular Extension, but built as a library instead"""
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/distutils_patch.py
|
"""
Ensure that the local copy of distutils is preferred over stdlib.
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
for more motivation.
"""
import sys
import re
import os
import importlib
import warnings
is_pypy = '__pypy__' in sys.builtin_module_names
def warn_distutils_present():
if 'distutils' not in sys.modules:
return
if is_pypy and sys.version_info < (3, 7):
# PyPy for 3.6 unconditionally imports distutils, so bypass the warning
# https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
return
warnings.warn(
"Distutils was imported before Setuptools. This usage is discouraged "
"and may exhibit undesirable behaviors or errors. Please use "
"Setuptools' objects directly or at least import Setuptools first.")
def clear_distutils():
if 'distutils' not in sys.modules:
return
warnings.warn("Setuptools is replacing distutils.")
mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
for name in mods:
del sys.modules[name]
def enabled():
"""
Allow selection of distutils by environment variable.
"""
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
return which == 'local'
def ensure_local_distutils():
clear_distutils()
distutils = importlib.import_module('setuptools._distutils')
distutils.__name__ = 'distutils'
sys.modules['distutils'] = distutils
# sanity check that submodules load as expected
core = importlib.import_module('distutils.core')
assert '_distutils' in core.__file__, core.__file__
warn_distutils_present()
if enabled():
ensure_local_distutils()
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/unicode_utils.py
|
import unicodedata
import sys
from setuptools.extern import six
# HFS Plus uses decomposed UTF-8
def decompose(path):
if isinstance(path, six.text_type):
return unicodedata.normalize('NFD', path)
try:
path = path.decode('utf-8')
path = unicodedata.normalize('NFD', path)
path = path.encode('utf-8')
except UnicodeError:
pass # Not UTF-8
return path
def filesys_decode(path):
"""
Ensure that the given path is decoded,
NONE when no expected encoding works
"""
if isinstance(path, six.text_type):
return path
fs_enc = sys.getfilesystemencoding() or 'utf-8'
candidates = fs_enc, 'utf-8'
for enc in candidates:
try:
return path.decode(enc)
except UnicodeDecodeError:
continue
def try_encode(string, enc):
"turn unicode encoding into a functional routine"
try:
return string.encode(enc)
except UnicodeEncodeError:
return None
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/monkey.py
|
"""
Monkey patching of distutils.
"""
import sys
import distutils.filelist
import platform
import types
import functools
from importlib import import_module
import inspect
from setuptools.extern import six
import setuptools
__all__ = []
"""
Everything is private. Contact the project team
if you think you need this functionality.
"""
def _get_mro(cls):
"""
Returns the bases classes for cls sorted by the MRO.
Works around an issue on Jython where inspect.getmro will not return all
base classes if multiple classes share the same name. Instead, this
function will return a tuple containing the class itself, and the contents
of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024.
"""
if platform.python_implementation() == "Jython":
return (cls,) + cls.__bases__
return inspect.getmro(cls)
def get_unpatched(item):
lookup = (
get_unpatched_class if isinstance(item, six.class_types) else
get_unpatched_function if isinstance(item, types.FunctionType) else
lambda item: None
)
return lookup(item)
def get_unpatched_class(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
external_bases = (
cls
for cls in _get_mro(cls)
if not cls.__module__.startswith('setuptools')
)
base = next(external_bases)
if not base.__module__.startswith('distutils'):
msg = "distutils has already been patched by %r" % cls
raise AssertionError(msg)
return base
def patch_all():
# we can't patch distutils.cmd, alas
distutils.core.Command = setuptools.Command
has_issue_12885 = sys.version_info <= (3, 5, 3)
if has_issue_12885:
# fix findall bug in distutils (http://bugs.python.org/issue12885)
distutils.filelist.findall = setuptools.findall
needs_warehouse = (
sys.version_info < (2, 7, 13)
or
(3, 4) < sys.version_info < (3, 4, 6)
or
(3, 5) < sys.version_info <= (3, 5, 3)
)
if needs_warehouse:
warehouse = 'https://upload.pypi.org/legacy/'
distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse
_patch_distribution_metadata()
# Install Distribution throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = setuptools.dist.Distribution
# Install the patched Extension
distutils.core.Extension = setuptools.extension.Extension
distutils.extension.Extension = setuptools.extension.Extension
if 'distutils.command.build_ext' in sys.modules:
sys.modules['distutils.command.build_ext'].Extension = (
setuptools.extension.Extension
)
patch_for_msvc_specialized_compiler()
def _patch_distribution_metadata():
"""Patch write_pkg_file and read_pkg_file for higher metadata standards"""
for attr in ('write_pkg_file', 'read_pkg_file', 'get_metadata_version'):
new_val = getattr(setuptools.dist, attr)
setattr(distutils.dist.DistributionMetadata, attr, new_val)
def patch_func(replacement, target_mod, func_name):
"""
Patch func_name in target_mod with replacement
Important - original must be resolved by name to avoid
patching an already patched function.
"""
original = getattr(target_mod, func_name)
# set the 'unpatched' attribute on the replacement to
# point to the original.
vars(replacement).setdefault('unpatched', original)
# replace the function in the original module
setattr(target_mod, func_name, replacement)
def get_unpatched_function(candidate):
return getattr(candidate, 'unpatched')
def patch_for_msvc_specialized_compiler():
"""
Patch functions in distutils to use standalone Microsoft Visual C++
compilers.
"""
# import late to avoid circular imports on Python < 3.5
msvc = import_module('setuptools.msvc')
if platform.system() != 'Windows':
# Compilers only availables on Microsoft Windows
return
def patch_params(mod_name, func_name):
"""
Prepare the parameters for patch_func to patch indicated function.
"""
repl_prefix = 'msvc9_' if 'msvc9' in mod_name else 'msvc14_'
repl_name = repl_prefix + func_name.lstrip('_')
repl = getattr(msvc, repl_name)
mod = import_module(mod_name)
if not hasattr(mod, func_name):
raise ImportError(func_name)
return repl, mod, func_name
# Python 2.7 to 3.4
msvc9 = functools.partial(patch_params, 'distutils.msvc9compiler')
# Python 3.5+
msvc14 = functools.partial(patch_params, 'distutils._msvccompiler')
try:
# Patch distutils.msvc9compiler
patch_func(*msvc9('find_vcvarsall'))
patch_func(*msvc9('query_vcvarsall'))
except ImportError:
pass
try:
# Patch distutils._msvccompiler._get_vc_env
patch_func(*msvc14('_get_vc_env'))
except ImportError:
pass
try:
# Patch distutils._msvccompiler.gen_lib_options for Numpy
patch_func(*msvc14('gen_lib_options'))
except ImportError:
pass
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/build_meta.py
|
"""A PEP 517 interface to setuptools
Previously, when a user or a command line tool (let's call it a "frontend")
needed to make a request of setuptools to take a certain action, for
example, generating a list of installation requirements, the frontend would
would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line.
PEP 517 defines a different method of interfacing with setuptools. Rather
than calling "setup.py" directly, the frontend should:
1. Set the current directory to the directory with a setup.py file
2. Import this module into a safe python interpreter (one in which
setuptools can potentially set global variables or crash hard).
3. Call one of the functions defined in PEP 517.
What each function does is defined in PEP 517. However, here is a "casual"
definition of the functions (this definition should not be relied on for
bug reports or API stability):
- `build_wheel`: build a wheel in the folder and return the basename
- `get_requires_for_build_wheel`: get the `setup_requires` to build
- `prepare_metadata_for_build_wheel`: get the `install_requires`
- `build_sdist`: build an sdist in the folder and return the basename
- `get_requires_for_build_sdist`: get the `setup_requires` to build
Again, this is not a formal definition! Just a "taste" of the module.
"""
import io
import os
import sys
import tokenize
import shutil
import contextlib
import setuptools
import distutils
from setuptools.py31compat import TemporaryDirectory
from pkg_resources import parse_requirements
__all__ = ['get_requires_for_build_sdist',
'get_requires_for_build_wheel',
'prepare_metadata_for_build_wheel',
'build_wheel',
'build_sdist',
'__legacy__',
'SetupRequirementsError']
class SetupRequirementsError(BaseException):
def __init__(self, specifiers):
self.specifiers = specifiers
class Distribution(setuptools.dist.Distribution):
def fetch_build_eggs(self, specifiers):
specifier_list = list(map(str, parse_requirements(specifiers)))
raise SetupRequirementsError(specifier_list)
@classmethod
@contextlib.contextmanager
def patch(cls):
"""
Replace
distutils.dist.Distribution with this class
for the duration of this context.
"""
orig = distutils.core.Distribution
distutils.core.Distribution = cls
try:
yield
finally:
distutils.core.Distribution = orig
def _to_str(s):
"""
Convert a filename to a string (on Python 2, explicitly
a byte string, not Unicode) as distutils checks for the
exact type str.
"""
if sys.version_info[0] == 2 and not isinstance(s, str):
# Assume it's Unicode, as that's what the PEP says
# should be provided.
return s.encode(sys.getfilesystemencoding())
return s
def _get_immediate_subdirectories(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def _file_with_extension(directory, extension):
matching = (
f for f in os.listdir(directory)
if f.endswith(extension)
)
file, = matching
return file
def _open_setup_script(setup_script):
if not os.path.exists(setup_script):
# Supply a default setup.py
return io.StringIO(u"from setuptools import setup; setup()")
return getattr(tokenize, 'open', open)(setup_script)
class _BuildMetaBackend(object):
def _fix_config(self, config_settings):
config_settings = config_settings or {}
config_settings.setdefault('--global-option', [])
return config_settings
def _get_build_requires(self, config_settings, requirements):
config_settings = self._fix_config(config_settings)
sys.argv = sys.argv[:1] + ['egg_info'] + \
config_settings["--global-option"]
try:
with Distribution.patch():
self.run_setup()
except SetupRequirementsError as e:
requirements += e.specifiers
return requirements
def run_setup(self, setup_script='setup.py'):
# Note that we can reuse our build directory between calls
# Correctness comes first, then optimization later
__file__ = setup_script
__name__ = '__main__'
with _open_setup_script(__file__) as f:
code = f.read().replace(r'\r\n', r'\n')
exec(compile(code, __file__, 'exec'), locals())
def get_requires_for_build_wheel(self, config_settings=None):
config_settings = self._fix_config(config_settings)
return self._get_build_requires(
config_settings, requirements=['wheel'])
def get_requires_for_build_sdist(self, config_settings=None):
config_settings = self._fix_config(config_settings)
return self._get_build_requires(config_settings, requirements=[])
def prepare_metadata_for_build_wheel(self, metadata_directory,
config_settings=None):
sys.argv = sys.argv[:1] + ['dist_info', '--egg-base',
_to_str(metadata_directory)]
self.run_setup()
dist_info_directory = metadata_directory
while True:
dist_infos = [f for f in os.listdir(dist_info_directory)
if f.endswith('.dist-info')]
if (
len(dist_infos) == 0 and
len(_get_immediate_subdirectories(dist_info_directory)) == 1
):
dist_info_directory = os.path.join(
dist_info_directory, os.listdir(dist_info_directory)[0])
continue
assert len(dist_infos) == 1
break
# PEP 517 requires that the .dist-info directory be placed in the
# metadata_directory. To comply, we MUST copy the directory to the root
if dist_info_directory != metadata_directory:
shutil.move(
os.path.join(dist_info_directory, dist_infos[0]),
metadata_directory)
shutil.rmtree(dist_info_directory, ignore_errors=True)
return dist_infos[0]
def _build_with_temp_dir(self, setup_command, result_extension,
result_directory, config_settings):
config_settings = self._fix_config(config_settings)
result_directory = os.path.abspath(result_directory)
# Build in a temporary directory, then copy to the target.
os.makedirs(result_directory, exist_ok=True)
with TemporaryDirectory(dir=result_directory) as tmp_dist_dir:
sys.argv = (sys.argv[:1] + setup_command +
['--dist-dir', tmp_dist_dir] +
config_settings["--global-option"])
self.run_setup()
result_basename = _file_with_extension(
tmp_dist_dir, result_extension)
result_path = os.path.join(result_directory, result_basename)
if os.path.exists(result_path):
# os.rename will fail overwriting on non-Unix.
os.remove(result_path)
os.rename(os.path.join(tmp_dist_dir, result_basename), result_path)
return result_basename
def build_wheel(self, wheel_directory, config_settings=None,
metadata_directory=None):
return self._build_with_temp_dir(['bdist_wheel'], '.whl',
wheel_directory, config_settings)
def build_sdist(self, sdist_directory, config_settings=None):
return self._build_with_temp_dir(['sdist', '--formats', 'gztar'],
'.tar.gz', sdist_directory,
config_settings)
class _BuildMetaLegacyBackend(_BuildMetaBackend):
"""Compatibility backend for setuptools
This is a version of setuptools.build_meta that endeavors
to maintain backwards
compatibility with pre-PEP 517 modes of invocation. It
exists as a temporary
bridge between the old packaging mechanism and the new
packaging mechanism,
and will eventually be removed.
"""
def run_setup(self, setup_script='setup.py'):
# In order to maintain compatibility with scripts assuming that
# the setup.py script is in a directory on the PYTHONPATH, inject
# '' into sys.path. (pypa/setuptools#1642)
sys_path = list(sys.path) # Save the original path
script_dir = os.path.dirname(os.path.abspath(setup_script))
if script_dir not in sys.path:
sys.path.insert(0, script_dir)
# Some setup.py scripts (e.g. in pygame and numpy) use sys.argv[0] to
# get the directory of the source code. They expect it to refer to the
# setup.py script.
sys_argv_0 = sys.argv[0]
sys.argv[0] = setup_script
try:
super(_BuildMetaLegacyBackend,
self).run_setup(setup_script=setup_script)
finally:
# While PEP 517 frontends should be calling each hook in a fresh
# subprocess according to the standard (and thus it should not be
# strictly necessary to restore the old sys.path), we'll restore
# the original path so that the path manipulation does not persist
# within the hook after run_setup is called.
sys.path[:] = sys_path
sys.argv[0] = sys_argv_0
# The primary backend
_BACKEND = _BuildMetaBackend()
get_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel
get_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist
prepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel
build_wheel = _BACKEND.build_wheel
build_sdist = _BACKEND.build_sdist
# The legacy backend
__legacy__ = _BuildMetaLegacyBackend()
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/errors.py
|
"""setuptools.errors
Provides exceptions used by setuptools modules.
"""
from distutils.errors import DistutilsError
class RemovedCommandError(DistutilsError, RuntimeError):
"""Error used for commands that have been removed in setuptools.
Since ``setuptools`` is built on ``distutils``, simply removing a command
from ``setuptools`` will make the behavior fall back to ``distutils``; this
error is raised if a command exists in ``distutils`` but has been actively
removed in ``setuptools``.
"""
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/dep_util.py
|
from distutils.dep_util import newer_group
# yes, this is was almost entirely copy-pasted from
# 'newer_pairwise()', this is just another convenience
# function.
def newer_pairwise_group(sources_groups, targets):
"""Walk both arguments in parallel, testing if each source group is newer
than its corresponding target. Returns a pair of lists (sources_groups,
targets) where sources is newer than target, according to the semantics
of 'newer_group()'.
"""
if len(sources_groups) != len(targets):
raise ValueError(
"'sources_group' and 'targets' must be the same length")
# build a pair of lists (sources_groups, targets) where source is newer
n_sources = []
n_targets = []
for i in range(len(sources_groups)):
if newer_group(sources_groups[i], targets[i]):
n_sources.append(sources_groups[i])
n_targets.append(targets[i])
return n_sources, n_targets
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/msvc.py
|
"""
Improved support for Microsoft Visual C++ compilers.
Known supported compilers:
--------------------------
Microsoft Visual C++ 9.0:
Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64)
Microsoft Windows SDK 6.1 (x86, x64, ia64)
Microsoft Windows SDK 7.0 (x86, x64, ia64)
Microsoft Visual C++ 10.0:
Microsoft Windows SDK 7.1 (x86, x64, ia64)
Microsoft Visual C++ 14.X:
Microsoft Visual C++ Build Tools 2015 (x86, x64, arm)
Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64)
Microsoft Visual Studio Build Tools 2019 (x86, x64, arm, arm64)
This may also support compilers shipped with compatible Visual Studio versions.
"""
import json
from io import open
from os import listdir, pathsep
from os.path import join, isfile, isdir, dirname
import sys
import platform
import itertools
import subprocess
import distutils.errors
from setuptools.extern.packaging.version import LegacyVersion
from setuptools.extern.six.moves import filterfalse
from .monkey import get_unpatched
if platform.system() == 'Windows':
from setuptools.extern.six.moves import winreg
from os import environ
else:
# Mock winreg and environ so the module can be imported on this platform.
class winreg:
HKEY_USERS = None
HKEY_CURRENT_USER = None
HKEY_LOCAL_MACHINE = None
HKEY_CLASSES_ROOT = None
environ = dict()
_msvc9_suppress_errors = (
# msvc9compiler isn't available on some platforms
ImportError,
# msvc9compiler raises DistutilsPlatformError in some
# environments. See #1118.
distutils.errors.DistutilsPlatformError,
)
try:
from distutils.msvc9compiler import Reg
except _msvc9_suppress_errors:
pass
def msvc9_find_vcvarsall(version):
"""
Patched "distutils.msvc9compiler.find_vcvarsall" to use the standalone
compiler build for Python
(VCForPython / Microsoft Visual C++ Compiler for Python 2.7).
Fall back to original behavior when the standalone compiler is not
available.
Redirect the path of "vcvarsall.bat".
Parameters
----------
version: float
Required Microsoft Visual C++ version.
Return
------
str
vcvarsall.bat path
"""
vc_base = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f'
key = vc_base % ('', version)
try:
# Per-user installs register the compiler path here
productdir = Reg.get_value(key, "installdir")
except KeyError:
try:
# All-user installs on a 64-bit system register here
key = vc_base % ('Wow6432Node\\', version)
productdir = Reg.get_value(key, "installdir")
except KeyError:
productdir = None
if productdir:
vcvarsall = join(productdir, "vcvarsall.bat")
if isfile(vcvarsall):
return vcvarsall
return get_unpatched(msvc9_find_vcvarsall)(version)
def msvc9_query_vcvarsall(ver, arch='x86', *args, **kwargs):
"""
Patched "distutils.msvc9compiler.query_vcvarsall" for support extra
Microsoft Visual C++ 9.0 and 10.0 compilers.
Set environment without use of "vcvarsall.bat".
Parameters
----------
ver: float
Required Microsoft Visual C++ version.
arch: str
Target architecture.
Return
------
dict
environment
"""
# Try to get environment from vcvarsall.bat (Classical way)
try:
orig = get_unpatched(msvc9_query_vcvarsall)
return orig(ver, arch, *args, **kwargs)
except distutils.errors.DistutilsPlatformError:
# Pass error if Vcvarsall.bat is missing
pass
except ValueError:
# Pass error if environment not set after executing vcvarsall.bat
pass
# If error, try to set environment directly
try:
return EnvironmentInfo(arch, ver).return_env()
except distutils.errors.DistutilsPlatformError as exc:
_augment_exception(exc, ver, arch)
raise
def _msvc14_find_vc2015():
"""Python 3.8 "distutils/_msvccompiler.py" backport"""
try:
key = winreg.OpenKey(
winreg.HKEY_LOCAL_MACHINE,
r"Software\Microsoft\VisualStudio\SxS\VC7",
0,
winreg.KEY_READ | winreg.KEY_WOW64_32KEY
)
except OSError:
return None, None
best_version = 0
best_dir = None
with key:
for i in itertools.count():
try:
v, vc_dir, vt = winreg.EnumValue(key, i)
except OSError:
break
if v and vt == winreg.REG_SZ and isdir(vc_dir):
try:
version = int(float(v))
except (ValueError, TypeError):
continue
if version >= 14 and version > best_version:
best_version, best_dir = version, vc_dir
return best_version, best_dir
def _msvc14_find_vc2017():
"""Python 3.8 "distutils/_msvccompiler.py" backport
Returns "15, path" based on the result of invoking vswhere.exe
If no install is found, returns "None, None"
The version is returned to avoid unnecessarily changing the function
result. It may be ignored when the path is not None.
If vswhere.exe is not available, by definition, VS 2017 is not
installed.
"""
root = environ.get("ProgramFiles(x86)") or environ.get("ProgramFiles")
if not root:
return None, None
try:
path = subprocess.check_output([
join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"),
"-latest",
"-prerelease",
"-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
"-property", "installationPath",
"-products", "*",
]).decode(encoding="mbcs", errors="strict").strip()
except (subprocess.CalledProcessError, OSError, UnicodeDecodeError):
return None, None
path = join(path, "VC", "Auxiliary", "Build")
if isdir(path):
return 15, path
return None, None
PLAT_SPEC_TO_RUNTIME = {
'x86': 'x86',
'x86_amd64': 'x64',
'x86_arm': 'arm',
'x86_arm64': 'arm64'
}
def _msvc14_find_vcvarsall(plat_spec):
"""Python 3.8 "distutils/_msvccompiler.py" backport"""
_, best_dir = _msvc14_find_vc2017()
vcruntime = None
if plat_spec in PLAT_SPEC_TO_RUNTIME:
vcruntime_plat = PLAT_SPEC_TO_RUNTIME[plat_spec]
else:
vcruntime_plat = 'x64' if 'amd64' in plat_spec else 'x86'
if best_dir:
vcredist = join(best_dir, "..", "..", "redist", "MSVC", "**",
vcruntime_plat, "Microsoft.VC14*.CRT",
"vcruntime140.dll")
try:
import glob
vcruntime = glob.glob(vcredist, recursive=True)[-1]
except (ImportError, OSError, LookupError):
vcruntime = None
if not best_dir:
best_version, best_dir = _msvc14_find_vc2015()
if best_version:
vcruntime = join(best_dir, 'redist', vcruntime_plat,
"Microsoft.VC140.CRT", "vcruntime140.dll")
if not best_dir:
return None, None
vcvarsall = join(best_dir, "vcvarsall.bat")
if not isfile(vcvarsall):
return None, None
if not vcruntime or not isfile(vcruntime):
vcruntime = None
return vcvarsall, vcruntime
def _msvc14_get_vc_env(plat_spec):
"""Python 3.8 "distutils/_msvccompiler.py" backport"""
if "DISTUTILS_USE_SDK" in environ:
return {
key.lower(): value
for key, value in environ.items()
}
vcvarsall, vcruntime = _msvc14_find_vcvarsall(plat_spec)
if not vcvarsall:
raise distutils.errors.DistutilsPlatformError(
"Unable to find vcvarsall.bat"
)
try:
out = subprocess.check_output(
'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec),
stderr=subprocess.STDOUT,
).decode('utf-16le', errors='replace')
except subprocess.CalledProcessError as exc:
raise distutils.errors.DistutilsPlatformError(
"Error executing {}".format(exc.cmd)
) from exc
env = {
key.lower(): value
for key, _, value in
(line.partition('=') for line in out.splitlines())
if key and value
}
if vcruntime:
env['py_vcruntime_redist'] = vcruntime
return env
def msvc14_get_vc_env(plat_spec):
"""
Patched "distutils._msvccompiler._get_vc_env" for support extra
Microsoft Visual C++ 14.X compilers.
Set environment without use of "vcvarsall.bat".
Parameters
----------
plat_spec: str
Target architecture.
Return
------
dict
environment
"""
# Always use backport from CPython 3.8
try:
return _msvc14_get_vc_env(plat_spec)
except distutils.errors.DistutilsPlatformError as exc:
_augment_exception(exc, 14.0)
raise
def msvc14_gen_lib_options(*args, **kwargs):
"""
Patched "distutils._msvccompiler.gen_lib_options" for fix
compatibility between "numpy.distutils" and "distutils._msvccompiler"
(for Numpy < 1.11.2)
"""
if "numpy.distutils" in sys.modules:
import numpy as np
if LegacyVersion(np.__version__) < LegacyVersion('1.11.2'):
return np.distutils.ccompiler.gen_lib_options(*args, **kwargs)
return get_unpatched(msvc14_gen_lib_options)(*args, **kwargs)
def _augment_exception(exc, version, arch=''):
"""
Add details to the exception message to help guide the user
as to what action will resolve it.
"""
# Error if MSVC++ directory not found or environment not set
message = exc.args[0]
if "vcvarsall" in message.lower() or "visual c" in message.lower():
# Special error message if MSVC++ not installed
tmpl = 'Microsoft Visual C++ {version:0.1f} is required.'
message = tmpl.format(**locals())
msdownload = 'www.microsoft.com/download/details.aspx?id=%d'
if version == 9.0:
if arch.lower().find('ia64') > -1:
# For VC++ 9.0, if IA64 support is needed, redirect user
# to Windows SDK 7.0.
# Note: No download link available from Microsoft.
message += ' Get it with "Microsoft Windows SDK 7.0"'
else:
# For VC++ 9.0 redirect user to Vc++ for Python 2.7 :
# This redirection link is maintained by Microsoft.
# Contact vspython@microsoft.com if it needs updating.
message += ' Get it from http://aka.ms/vcpython27'
elif version == 10.0:
# For VC++ 10.0 Redirect user to Windows SDK 7.1
message += ' Get it with "Microsoft Windows SDK 7.1": '
message += msdownload % 8279
elif version >= 14.0:
# For VC++ 14.X Redirect user to latest Visual C++ Build Tools
message += (' Get it with "Build Tools for Visual Studio": '
r'https://visualstudio.microsoft.com/downloads/')
exc.args = (message, )
class PlatformInfo:
"""
Current and Target Architectures information.
Parameters
----------
arch: str
Target architecture.
"""
current_cpu = environ.get('processor_architecture', '').lower()
def __init__(self, arch):
self.arch = arch.lower().replace('x64', 'amd64')
@property
def target_cpu(self):
"""
Return Target CPU architecture.
Return
------
str
Target CPU
"""
return self.arch[self.arch.find('_') + 1:]
def target_is_x86(self):
"""
Return True if target CPU is x86 32 bits..
Return
------
bool
CPU is x86 32 bits
"""
return self.target_cpu == 'x86'
def current_is_x86(self):
"""
Return True if current CPU is x86 32 bits..
Return
------
bool
CPU is x86 32 bits
"""
return self.current_cpu == 'x86'
def current_dir(self, hidex86=False, x64=False):
"""
Current platform specific subfolder.
Parameters
----------
hidex86: bool
return '' and not '\x86' if architecture is x86.
x64: bool
return '\x64' and not '\amd64' if architecture is amd64.
Return
------
str
subfolder: '\target', or '' (see hidex86 parameter)
"""
return (
'' if (self.current_cpu == 'x86' and hidex86) else
r'\x64' if (self.current_cpu == 'amd64' and x64) else
r'\%s' % self.current_cpu
)
def target_dir(self, hidex86=False, x64=False):
r"""
Target platform specific subfolder.
Parameters
----------
hidex86: bool
return '' and not '\x86' if architecture is x86.
x64: bool
return '\x64' and not '\amd64' if architecture is amd64.
Return
------
str
subfolder: '\current', or '' (see hidex86 parameter)
"""
return (
'' if (self.target_cpu == 'x86' and hidex86) else
r'\x64' if (self.target_cpu == 'amd64' and x64) else
r'\%s' % self.target_cpu
)
def cross_dir(self, forcex86=False):
r"""
Cross platform specific subfolder.
Parameters
----------
forcex86: bool
Use 'x86' as current architecture even if current architecture is
not x86.
Return
------
str
subfolder: '' if target architecture is current architecture,
'\current_target' if not.
"""
current = 'x86' if forcex86 else self.current_cpu
return (
'' if self.target_cpu == current else
self.target_dir().replace('\\', '\\%s_' % current)
)
class RegistryInfo:
"""
Microsoft Visual Studio related registry information.
Parameters
----------
platform_info: PlatformInfo
"PlatformInfo" instance.
"""
HKEYS = (winreg.HKEY_USERS,
winreg.HKEY_CURRENT_USER,
winreg.HKEY_LOCAL_MACHINE,
winreg.HKEY_CLASSES_ROOT)
def __init__(self, platform_info):
self.pi = platform_info
@property
def visualstudio(self):
"""
Microsoft Visual Studio root registry key.
Return
------
str
Registry key
"""
return 'VisualStudio'
@property
def sxs(self):
"""
Microsoft Visual Studio SxS registry key.
Return
------
str
Registry key
"""
return join(self.visualstudio, 'SxS')
@property
def vc(self):
"""
Microsoft Visual C++ VC7 registry key.
Return
------
str
Registry key
"""
return join(self.sxs, 'VC7')
@property
def vs(self):
"""
Microsoft Visual Studio VS7 registry key.
Return
------
str
Registry key
"""
return join(self.sxs, 'VS7')
@property
def vc_for_python(self):
"""
Microsoft Visual C++ for Python registry key.
Return
------
str
Registry key
"""
return r'DevDiv\VCForPython'
@property
def microsoft_sdk(self):
"""
Microsoft SDK registry key.
Return
------
str
Registry key
"""
return 'Microsoft SDKs'
@property
def windows_sdk(self):
"""
Microsoft Windows/Platform SDK registry key.
Return
------
str
Registry key
"""
return join(self.microsoft_sdk, 'Windows')
@property
def netfx_sdk(self):
"""
Microsoft .NET Framework SDK registry key.
Return
------
str
Registry key
"""
return join(self.microsoft_sdk, 'NETFXSDK')
@property
def windows_kits_roots(self):
"""
Microsoft Windows Kits Roots registry key.
Return
------
str
Registry key
"""
return r'Windows Kits\Installed Roots'
def microsoft(self, key, x86=False):
"""
Return key in Microsoft software registry.
Parameters
----------
key: str
Registry key path where look.
x86: str
Force x86 software registry.
Return
------
str
Registry key
"""
node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node'
return join('Software', node64, 'Microsoft', key)
def lookup(self, key, name):
"""
Look for values in registry in Microsoft software registry.
Parameters
----------
key: str
Registry key path where look.
name: str
Value name to find.
Return
------
str
value
"""
key_read = winreg.KEY_READ
openkey = winreg.OpenKey
closekey = winreg.CloseKey
ms = self.microsoft
for hkey in self.HKEYS:
bkey = None
try:
bkey = openkey(hkey, ms(key), 0, key_read)
except (OSError, IOError):
if not self.pi.current_is_x86():
try:
bkey = openkey(hkey, ms(key, True), 0, key_read)
except (OSError, IOError):
continue
else:
continue
try:
return winreg.QueryValueEx(bkey, name)[0]
except (OSError, IOError):
pass
finally:
if bkey:
closekey(bkey)
class SystemInfo:
"""
Microsoft Windows and Visual Studio related system information.
Parameters
----------
registry_info: RegistryInfo
"RegistryInfo" instance.
vc_ver: float
Required Microsoft Visual C++ version.
"""
# Variables and properties in this class use originals CamelCase variables
# names from Microsoft source files for more easy comparison.
WinDir = environ.get('WinDir', '')
ProgramFiles = environ.get('ProgramFiles', '')
ProgramFilesx86 = environ.get('ProgramFiles(x86)', ProgramFiles)
def __init__(self, registry_info, vc_ver=None):
self.ri = registry_info
self.pi = self.ri.pi
self.known_vs_paths = self.find_programdata_vs_vers()
# Except for VS15+, VC version is aligned with VS version
self.vs_ver = self.vc_ver = (
vc_ver or self._find_latest_available_vs_ver())
def _find_latest_available_vs_ver(self):
"""
Find the latest VC version
Return
------
float
version
"""
reg_vc_vers = self.find_reg_vs_vers()
if not (reg_vc_vers or self.known_vs_paths):
raise distutils.errors.DistutilsPlatformError(
'No Microsoft Visual C++ version found')
vc_vers = set(reg_vc_vers)
vc_vers.update(self.known_vs_paths)
return sorted(vc_vers)[-1]
def find_reg_vs_vers(self):
"""
Find Microsoft Visual Studio versions available in registry.
Return
------
list of float
Versions
"""
ms = self.ri.microsoft
vckeys = (self.ri.vc, self.ri.vc_for_python, self.ri.vs)
vs_vers = []
for hkey in self.ri.HKEYS:
for key in vckeys:
try:
bkey = winreg.OpenKey(hkey, ms(key), 0, winreg.KEY_READ)
except (OSError, IOError):
continue
with bkey:
subkeys, values, _ = winreg.QueryInfoKey(bkey)
for i in range(values):
try:
ver = float(winreg.EnumValue(bkey, i)[0])
if ver not in vs_vers:
vs_vers.append(ver)
except ValueError:
pass
for i in range(subkeys):
try:
ver = float(winreg.EnumKey(bkey, i))
if ver not in vs_vers:
vs_vers.append(ver)
except ValueError:
pass
return sorted(vs_vers)
def find_programdata_vs_vers(self):
r"""
Find Visual studio 2017+ versions from information in
"C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances".
Return
------
dict
float version as key, path as value.
"""
vs_versions = {}
instances_dir = \
r'C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances'
try:
hashed_names = listdir(instances_dir)
except (OSError, IOError):
# Directory not exists with all Visual Studio versions
return vs_versions
for name in hashed_names:
try:
# Get VS installation path from "state.json" file
state_path = join(instances_dir, name, 'state.json')
with open(state_path, 'rt', encoding='utf-8') as state_file:
state = json.load(state_file)
vs_path = state['installationPath']
# Raises OSError if this VS installation does not contain VC
listdir(join(vs_path, r'VC\Tools\MSVC'))
# Store version and path
vs_versions[self._as_float_version(
state['installationVersion'])] = vs_path
except (OSError, IOError, KeyError):
# Skip if "state.json" file is missing or bad format
continue
return vs_versions
@staticmethod
def _as_float_version(version):
"""
Return a string version as a simplified float version (major.minor)
Parameters
----------
version: str
Version.
Return
------
float
version
"""
return float('.'.join(version.split('.')[:2]))
@property
def VSInstallDir(self):
"""
Microsoft Visual Studio directory.
Return
------
str
path
"""
# Default path
default = join(self.ProgramFilesx86,
'Microsoft Visual Studio %0.1f' % self.vs_ver)
# Try to get path from registry, if fail use default path
return self.ri.lookup(self.ri.vs, '%0.1f' % self.vs_ver) or default
@property
def VCInstallDir(self):
"""
Microsoft Visual C++ directory.
Return
------
str
path
"""
path = self._guess_vc() or self._guess_vc_legacy()
if not isdir(path):
msg = 'Microsoft Visual C++ directory not found'
raise distutils.errors.DistutilsPlatformError(msg)
return path
def _guess_vc(self):
"""
Locate Visual C++ for VS2017+.
Return
------
str
path
"""
if self.vs_ver <= 14.0:
return ''
try:
# First search in known VS paths
vs_dir = self.known_vs_paths[self.vs_ver]
except KeyError:
# Else, search with path from registry
vs_dir = self.VSInstallDir
guess_vc = join(vs_dir, r'VC\Tools\MSVC')
# Subdir with VC exact version as name
try:
# Update the VC version with real one instead of VS version
vc_ver = listdir(guess_vc)[-1]
self.vc_ver = self._as_float_version(vc_ver)
return join(guess_vc, vc_ver)
except (OSError, IOError, IndexError):
return ''
def _guess_vc_legacy(self):
"""
Locate Visual C++ for versions prior to 2017.
Return
------
str
path
"""
default = join(self.ProgramFilesx86,
r'Microsoft Visual Studio %0.1f\VC' % self.vs_ver)
# Try to get "VC++ for Python" path from registry as default path
reg_path = join(self.ri.vc_for_python, '%0.1f' % self.vs_ver)
python_vc = self.ri.lookup(reg_path, 'installdir')
default_vc = join(python_vc, 'VC') if python_vc else default
# Try to get path from registry, if fail use default path
return self.ri.lookup(self.ri.vc, '%0.1f' % self.vs_ver) or default_vc
@property
def WindowsSdkVersion(self):
"""
Microsoft Windows SDK versions for specified MSVC++ version.
Return
------
tuple of str
versions
"""
if self.vs_ver <= 9.0:
return '7.0', '6.1', '6.0a'
elif self.vs_ver == 10.0:
return '7.1', '7.0a'
elif self.vs_ver == 11.0:
return '8.0', '8.0a'
elif self.vs_ver == 12.0:
return '8.1', '8.1a'
elif self.vs_ver >= 14.0:
return '10.0', '8.1'
@property
def WindowsSdkLastVersion(self):
"""
Microsoft Windows SDK last version.
Return
------
str
version
"""
return self._use_last_dir_name(join(self.WindowsSdkDir, 'lib'))
@property
def WindowsSdkDir(self):
"""
Microsoft Windows SDK directory.
Return
------
str
path
"""
sdkdir = ''
for ver in self.WindowsSdkVersion:
# Try to get it from registry
loc = join(self.ri.windows_sdk, 'v%s' % ver)
sdkdir = self.ri.lookup(loc, 'installationfolder')
if sdkdir:
break
if not sdkdir or not isdir(sdkdir):
# Try to get "VC++ for Python" version from registry
path = join(self.ri.vc_for_python, '%0.1f' % self.vc_ver)
install_base = self.ri.lookup(path, 'installdir')
if install_base:
sdkdir = join(install_base, 'WinSDK')
if not sdkdir or not isdir(sdkdir):
# If fail, use default new path
for ver in self.WindowsSdkVersion:
intver = ver[:ver.rfind('.')]
path = r'Microsoft SDKs\Windows Kits\%s' % intver
d = join(self.ProgramFiles, path)
if isdir(d):
sdkdir = d
if not sdkdir or not isdir(sdkdir):
# If fail, use default old path
for ver in self.WindowsSdkVersion:
path = r'Microsoft SDKs\Windows\v%s' % ver
d = join(self.ProgramFiles, path)
if isdir(d):
sdkdir = d
if not sdkdir:
# If fail, use Platform SDK
sdkdir = join(self.VCInstallDir, 'PlatformSDK')
return sdkdir
@property
def WindowsSDKExecutablePath(self):
"""
Microsoft Windows SDK executable directory.
Return
------
str
path
"""
# Find WinSDK NetFx Tools registry dir name
if self.vs_ver <= 11.0:
netfxver = 35
arch = ''
else:
netfxver = 40
hidex86 = True if self.vs_ver <= 12.0 else False
arch = self.pi.current_dir(x64=True, hidex86=hidex86)
fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\', '-'))
# list all possibles registry paths
regpaths = []
if self.vs_ver >= 14.0:
for ver in self.NetFxSdkVersion:
regpaths += [join(self.ri.netfx_sdk, ver, fx)]
for ver in self.WindowsSdkVersion:
regpaths += [join(self.ri.windows_sdk, 'v%sA' % ver, fx)]
# Return installation folder from the more recent path
for path in regpaths:
execpath = self.ri.lookup(path, 'installationfolder')
if execpath:
return execpath
@property
def FSharpInstallDir(self):
"""
Microsoft Visual F# directory.
Return
------
str
path
"""
path = join(self.ri.visualstudio, r'%0.1f\Setup\F#' % self.vs_ver)
return self.ri.lookup(path, 'productdir') or ''
@property
def UniversalCRTSdkDir(self):
"""
Microsoft Universal CRT SDK directory.
Return
------
str
path
"""
# Set Kit Roots versions for specified MSVC++ version
vers = ('10', '81') if self.vs_ver >= 14.0 else ()
# Find path of the more recent Kit
for ver in vers:
sdkdir = self.ri.lookup(self.ri.windows_kits_roots,
'kitsroot%s' % ver)
if sdkdir:
return sdkdir or ''
@property
def UniversalCRTSdkLastVersion(self):
"""
Microsoft Universal C Runtime SDK last version.
Return
------
str
version
"""
return self._use_last_dir_name(join(self.UniversalCRTSdkDir, 'lib'))
@property
def NetFxSdkVersion(self):
"""
Microsoft .NET Framework SDK versions.
Return
------
tuple of str
versions
"""
# Set FxSdk versions for specified VS version
return (('4.7.2', '4.7.1', '4.7',
'4.6.2', '4.6.1', '4.6',
'4.5.2', '4.5.1', '4.5')
if self.vs_ver >= 14.0 else ())
@property
def NetFxSdkDir(self):
"""
Microsoft .NET Framework SDK directory.
Return
------
str
path
"""
sdkdir = ''
for ver in self.NetFxSdkVersion:
loc = join(self.ri.netfx_sdk, ver)
sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder')
if sdkdir:
break
return sdkdir
@property
def FrameworkDir32(self):
"""
Microsoft .NET Framework 32bit directory.
Return
------
str
path
"""
# Default path
guess_fw = join(self.WinDir, r'Microsoft.NET\Framework')
# Try to get path from registry, if fail use default path
return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw
@property
def FrameworkDir64(self):
"""
Microsoft .NET Framework 64bit directory.
Return
------
str
path
"""
# Default path
guess_fw = join(self.WinDir, r'Microsoft.NET\Framework64')
# Try to get path from registry, if fail use default path
return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw
@property
def FrameworkVersion32(self):
"""
Microsoft .NET Framework 32bit versions.
Return
------
tuple of str
versions
"""
return self._find_dot_net_versions(32)
@property
def FrameworkVersion64(self):
"""
Microsoft .NET Framework 64bit versions.
Return
------
tuple of str
versions
"""
return self._find_dot_net_versions(64)
def _find_dot_net_versions(self, bits):
"""
Find Microsoft .NET Framework versions.
Parameters
----------
bits: int
Platform number of bits: 32 or 64.
Return
------
tuple of str
versions
"""
# Find actual .NET version in registry
reg_ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits)
dot_net_dir = getattr(self, 'FrameworkDir%d' % bits)
ver = reg_ver or self._use_last_dir_name(dot_net_dir, 'v') or ''
# Set .NET versions for specified MSVC++ version
if self.vs_ver >= 12.0:
return ver, 'v4.0'
elif self.vs_ver >= 10.0:
return 'v4.0.30319' if ver.lower()[:2] != 'v4' else ver, 'v3.5'
elif self.vs_ver == 9.0:
return 'v3.5', 'v2.0.50727'
elif self.vs_ver == 8.0:
return 'v3.0', 'v2.0.50727'
@staticmethod
def _use_last_dir_name(path, prefix=''):
"""
Return name of the last dir in path or '' if no dir found.
Parameters
----------
path: str
Use dirs in this path
prefix: str
Use only dirs starting by this prefix
Return
------
str
name
"""
matching_dirs = (
dir_name
for dir_name in reversed(listdir(path))
if isdir(join(path, dir_name)) and
dir_name.startswith(prefix)
)
return next(matching_dirs, None) or ''
class EnvironmentInfo:
"""
Return environment variables for specified Microsoft Visual C++ version
and platform : Lib, Include, Path and libpath.
This function is compatible with Microsoft Visual C++ 9.0 to 14.X.
Script created by analysing Microsoft environment configuration files like
"vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ...
Parameters
----------
arch: str
Target architecture.
vc_ver: float
Required Microsoft Visual C++ version. If not set, autodetect the last
version.
vc_min_ver: float
Minimum Microsoft Visual C++ version.
"""
# Variables and properties in this class use originals CamelCase variables
# names from Microsoft source files for more easy comparison.
def __init__(self, arch, vc_ver=None, vc_min_ver=0):
self.pi = PlatformInfo(arch)
self.ri = RegistryInfo(self.pi)
self.si = SystemInfo(self.ri, vc_ver)
if self.vc_ver < vc_min_ver:
err = 'No suitable Microsoft Visual C++ version found'
raise distutils.errors.DistutilsPlatformError(err)
@property
def vs_ver(self):
"""
Microsoft Visual Studio.
Return
------
float
version
"""
return self.si.vs_ver
@property
def vc_ver(self):
"""
Microsoft Visual C++ version.
Return
------
float
version
"""
return self.si.vc_ver
@property
def VSTools(self):
"""
Microsoft Visual Studio Tools.
Return
------
list of str
paths
"""
paths = [r'Common7\IDE', r'Common7\Tools']
if self.vs_ver >= 14.0:
arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow']
paths += [r'Team Tools\Performance Tools']
paths += [r'Team Tools\Performance Tools%s' % arch_subdir]
return [join(self.si.VSInstallDir, path) for path in paths]
@property
def VCIncludes(self):
"""
Microsoft Visual C++ & Microsoft Foundation Class Includes.
Return
------
list of str
paths
"""
return [join(self.si.VCInstallDir, 'Include'),
join(self.si.VCInstallDir, r'ATLMFC\Include')]
@property
def VCLibraries(self):
"""
Microsoft Visual C++ & Microsoft Foundation Class Libraries.
Return
------
list of str
paths
"""
if self.vs_ver >= 15.0:
arch_subdir = self.pi.target_dir(x64=True)
else:
arch_subdir = self.pi.target_dir(hidex86=True)
paths = ['Lib%s' % arch_subdir, r'ATLMFC\Lib%s' % arch_subdir]
if self.vs_ver >= 14.0:
paths += [r'Lib\store%s' % arch_subdir]
return [join(self.si.VCInstallDir, path) for path in paths]
@property
def VCStoreRefs(self):
"""
Microsoft Visual C++ store references Libraries.
Return
------
list of str
paths
"""
if self.vs_ver < 14.0:
return []
return [join(self.si.VCInstallDir, r'Lib\store\references')]
@property
def VCTools(self):
"""
Microsoft Visual C++ Tools.
Return
------
list of str
paths
"""
si = self.si
tools = [join(si.VCInstallDir, 'VCPackages')]
forcex86 = True if self.vs_ver <= 10.0 else False
arch_subdir = self.pi.cross_dir(forcex86)
if arch_subdir:
tools += [join(si.VCInstallDir, 'Bin%s' % arch_subdir)]
if self.vs_ver == 14.0:
path = 'Bin%s' % self.pi.current_dir(hidex86=True)
tools += [join(si.VCInstallDir, path)]
elif self.vs_ver >= 15.0:
host_dir = (r'bin\HostX86%s' if self.pi.current_is_x86() else
r'bin\HostX64%s')
tools += [join(
si.VCInstallDir, host_dir % self.pi.target_dir(x64=True))]
if self.pi.current_cpu != self.pi.target_cpu:
tools += [join(
si.VCInstallDir, host_dir % self.pi.current_dir(x64=True))]
else:
tools += [join(si.VCInstallDir, 'Bin')]
return tools
@property
def OSLibraries(self):
"""
Microsoft Windows SDK Libraries.
Return
------
list of str
paths
"""
if self.vs_ver <= 10.0:
arch_subdir = self.pi.target_dir(hidex86=True, x64=True)
return [join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)]
else:
arch_subdir = self.pi.target_dir(x64=True)
lib = join(self.si.WindowsSdkDir, 'lib')
libver = self._sdk_subdir
return [join(lib, '%sum%s' % (libver, arch_subdir))]
@property
def OSIncludes(self):
"""
Microsoft Windows SDK Include.
Return
------
list of str
paths
"""
include = join(self.si.WindowsSdkDir, 'include')
if self.vs_ver <= 10.0:
return [include, join(include, 'gl')]
else:
if self.vs_ver >= 14.0:
sdkver = self._sdk_subdir
else:
sdkver = ''
return [join(include, '%sshared' % sdkver),
join(include, '%sum' % sdkver),
join(include, '%swinrt' % sdkver)]
@property
def OSLibpath(self):
"""
Microsoft Windows SDK Libraries Paths.
Return
------
list of str
paths
"""
ref = join(self.si.WindowsSdkDir, 'References')
libpath = []
if self.vs_ver <= 9.0:
libpath += self.OSLibraries
if self.vs_ver >= 11.0:
libpath += [join(ref, r'CommonConfiguration\Neutral')]
if self.vs_ver >= 14.0:
libpath += [
ref,
join(self.si.WindowsSdkDir, 'UnionMetadata'),
join(
ref, 'Windows.Foundation.UniversalApiContract', '1.0.0.0'),
join(ref, 'Windows.Foundation.FoundationContract', '1.0.0.0'),
join(
ref, 'Windows.Networking.Connectivity.WwanContract',
'1.0.0.0'),
join(
self.si.WindowsSdkDir, 'ExtensionSDKs', 'Microsoft.VCLibs',
'%0.1f' % self.vs_ver, 'References', 'CommonConfiguration',
'neutral'),
]
return libpath
@property
def SdkTools(self):
"""
Microsoft Windows SDK Tools.
Return
------
list of str
paths
"""
return list(self._sdk_tools())
def _sdk_tools(self):
"""
Microsoft Windows SDK Tools paths generator.
Return
------
generator of str
paths
"""
if self.vs_ver < 15.0:
bin_dir = 'Bin' if self.vs_ver <= 11.0 else r'Bin\x86'
yield join(self.si.WindowsSdkDir, bin_dir)
if not self.pi.current_is_x86():
arch_subdir = self.pi.current_dir(x64=True)
path = 'Bin%s' % arch_subdir
yield join(self.si.WindowsSdkDir, path)
if self.vs_ver in (10.0, 11.0):
if self.pi.target_is_x86():
arch_subdir = ''
else:
arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir
yield join(self.si.WindowsSdkDir, path)
elif self.vs_ver >= 15.0:
path = join(self.si.WindowsSdkDir, 'Bin')
arch_subdir = self.pi.current_dir(x64=True)
sdkver = self.si.WindowsSdkLastVersion
yield join(path, '%s%s' % (sdkver, arch_subdir))
if self.si.WindowsSDKExecutablePath:
yield self.si.WindowsSDKExecutablePath
@property
def _sdk_subdir(self):
"""
Microsoft Windows SDK version subdir.
Return
------
str
subdir
"""
ucrtver = self.si.WindowsSdkLastVersion
return ('%s\\' % ucrtver) if ucrtver else ''
@property
def SdkSetup(self):
"""
Microsoft Windows SDK Setup.
Return
------
list of str
paths
"""
if self.vs_ver > 9.0:
return []
return [join(self.si.WindowsSdkDir, 'Setup')]
@property
def FxTools(self):
"""
Microsoft .NET Framework Tools.
Return
------
list of str
paths
"""
pi = self.pi
si = self.si
if self.vs_ver <= 10.0:
include32 = True
include64 = not pi.target_is_x86() and not pi.current_is_x86()
else:
include32 = pi.target_is_x86() or pi.current_is_x86()
include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64'
tools = []
if include32:
tools += [join(si.FrameworkDir32, ver)
for ver in si.FrameworkVersion32]
if include64:
tools += [join(si.FrameworkDir64, ver)
for ver in si.FrameworkVersion64]
return tools
@property
def NetFxSDKLibraries(self):
"""
Microsoft .Net Framework SDK Libraries.
Return
------
list of str
paths
"""
if self.vs_ver < 14.0 or not self.si.NetFxSdkDir:
return []
arch_subdir = self.pi.target_dir(x64=True)
return [join(self.si.NetFxSdkDir, r'lib\um%s' % arch_subdir)]
@property
def NetFxSDKIncludes(self):
"""
Microsoft .Net Framework SDK Includes.
Return
------
list of str
paths
"""
if self.vs_ver < 14.0 or not self.si.NetFxSdkDir:
return []
return [join(self.si.NetFxSdkDir, r'include\um')]
@property
def VsTDb(self):
"""
Microsoft Visual Studio Team System Database.
Return
------
list of str
paths
"""
return [join(self.si.VSInstallDir, r'VSTSDB\Deploy')]
@property
def MSBuild(self):
"""
Microsoft Build Engine.
Return
------
list of str
paths
"""
if self.vs_ver < 12.0:
return []
elif self.vs_ver < 15.0:
base_path = self.si.ProgramFilesx86
arch_subdir = self.pi.current_dir(hidex86=True)
else:
base_path = self.si.VSInstallDir
arch_subdir = ''
path = r'MSBuild\%0.1f\bin%s' % (self.vs_ver, arch_subdir)
build = [join(base_path, path)]
if self.vs_ver >= 15.0:
# Add Roslyn C# & Visual Basic Compiler
build += [join(base_path, path, 'Roslyn')]
return build
@property
def HTMLHelpWorkshop(self):
"""
Microsoft HTML Help Workshop.
Return
------
list of str
paths
"""
if self.vs_ver < 11.0:
return []
return [join(self.si.ProgramFilesx86, 'HTML Help Workshop')]
@property
def UCRTLibraries(self):
"""
Microsoft Universal C Runtime SDK Libraries.
Return
------
list of str
paths
"""
if self.vs_ver < 14.0:
return []
arch_subdir = self.pi.target_dir(x64=True)
lib = join(self.si.UniversalCRTSdkDir, 'lib')
ucrtver = self._ucrt_subdir
return [join(lib, '%sucrt%s' % (ucrtver, arch_subdir))]
@property
def UCRTIncludes(self):
"""
Microsoft Universal C Runtime SDK Include.
Return
------
list of str
paths
"""
if self.vs_ver < 14.0:
return []
include = join(self.si.UniversalCRTSdkDir, 'include')
return [join(include, '%sucrt' % self._ucrt_subdir)]
@property
def _ucrt_subdir(self):
"""
Microsoft Universal C Runtime SDK version subdir.
Return
------
str
subdir
"""
ucrtver = self.si.UniversalCRTSdkLastVersion
return ('%s\\' % ucrtver) if ucrtver else ''
@property
def FSharp(self):
"""
Microsoft Visual F#.
Return
------
list of str
paths
"""
if 11.0 > self.vs_ver > 12.0:
return []
return [self.si.FSharpInstallDir]
@property
def VCRuntimeRedist(self):
"""
Microsoft Visual C++ runtime redistributable dll.
Return
------
str
path
"""
vcruntime = 'vcruntime%d0.dll' % self.vc_ver
arch_subdir = self.pi.target_dir(x64=True).strip('\\')
# Installation prefixes candidates
prefixes = []
tools_path = self.si.VCInstallDir
redist_path = dirname(tools_path.replace(r'\Tools', r'\Redist'))
if isdir(redist_path):
# Redist version may not be exactly the same as tools
redist_path = join(redist_path, listdir(redist_path)[-1])
prefixes += [redist_path, join(redist_path, 'onecore')]
prefixes += [join(tools_path, 'redist')] # VS14 legacy path
# CRT directory
crt_dirs = ('Microsoft.VC%d.CRT' % (self.vc_ver * 10),
# Sometime store in directory with VS version instead of VC
'Microsoft.VC%d.CRT' % (int(self.vs_ver) * 10))
# vcruntime path
for prefix, crt_dir in itertools.product(prefixes, crt_dirs):
path = join(prefix, arch_subdir, crt_dir, vcruntime)
if isfile(path):
return path
def return_env(self, exists=True):
"""
Return environment dict.
Parameters
----------
exists: bool
It True, only return existing paths.
Return
------
dict
environment
"""
env = dict(
include=self._build_paths('include',
[self.VCIncludes,
self.OSIncludes,
self.UCRTIncludes,
self.NetFxSDKIncludes],
exists),
lib=self._build_paths('lib',
[self.VCLibraries,
self.OSLibraries,
self.FxTools,
self.UCRTLibraries,
self.NetFxSDKLibraries],
exists),
libpath=self._build_paths('libpath',
[self.VCLibraries,
self.FxTools,
self.VCStoreRefs,
self.OSLibpath],
exists),
path=self._build_paths('path',
[self.VCTools,
self.VSTools,
self.VsTDb,
self.SdkTools,
self.SdkSetup,
self.FxTools,
self.MSBuild,
self.HTMLHelpWorkshop,
self.FSharp],
exists),
)
if self.vs_ver >= 14 and isfile(self.VCRuntimeRedist):
env['py_vcruntime_redist'] = self.VCRuntimeRedist
return env
def _build_paths(self, name, spec_path_lists, exists):
"""
Given an environment variable name and specified paths,
return a pathsep-separated string of paths containing
unique, extant, directories from those paths and from
the environment variable. Raise an error if no paths
are resolved.
Parameters
----------
name: str
Environment variable name
spec_path_lists: list of str
Paths
exists: bool
It True, only return existing paths.
Return
------
str
Pathsep-separated paths
"""
# flatten spec_path_lists
spec_paths = itertools.chain.from_iterable(spec_path_lists)
env_paths = environ.get(name, '').split(pathsep)
paths = itertools.chain(spec_paths, env_paths)
extant_paths = list(filter(isdir, paths)) if exists else paths
if not extant_paths:
msg = "%s environment variable is empty" % name.upper()
raise distutils.errors.DistutilsPlatformError(msg)
unique_paths = self._unique_everseen(extant_paths)
return pathsep.join(unique_paths)
# from Python docs
@staticmethod
def _unique_everseen(iterable, key=None):
"""
List unique elements, preserving order.
Remember all elements ever seen.
_unique_everseen('AAAABBBCCDAABBB') --> A B C D
_unique_everseen('ABBCcAD', str.lower) --> A B C D
"""
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/dist.py
|
# -*- coding: utf-8 -*-
__all__ = ['Distribution']
import io
import sys
import re
import os
import warnings
import numbers
import distutils.log
import distutils.core
import distutils.cmd
import distutils.dist
from distutils.util import strtobool
from distutils.debug import DEBUG
from distutils.fancy_getopt import translate_longopt
import itertools
from collections import defaultdict
from email import message_from_file
from distutils.errors import DistutilsOptionError, DistutilsSetupError
from distutils.util import rfc822_escape
from distutils.version import StrictVersion
from setuptools.extern import six
from setuptools.extern import packaging
from setuptools.extern import ordered_set
from setuptools.extern.six.moves import map, filter, filterfalse
from . import SetuptoolsDeprecationWarning
import setuptools
from setuptools import windows_support
from setuptools.monkey import get_unpatched
from setuptools.config import parse_configuration
import pkg_resources
__import__('setuptools.extern.packaging.specifiers')
__import__('setuptools.extern.packaging.version')
def _get_unpatched(cls):
warnings.warn("Do not call this function", DistDeprecationWarning)
return get_unpatched(cls)
def get_metadata_version(self):
mv = getattr(self, 'metadata_version', None)
if mv is None:
if self.long_description_content_type or self.provides_extras:
mv = StrictVersion('2.1')
elif (self.maintainer is not None or
self.maintainer_email is not None or
getattr(self, 'python_requires', None) is not None or
self.project_urls):
mv = StrictVersion('1.2')
elif (self.provides or self.requires or self.obsoletes or
self.classifiers or self.download_url):
mv = StrictVersion('1.1')
else:
mv = StrictVersion('1.0')
self.metadata_version = mv
return mv
def read_pkg_file(self, file):
"""Reads the metadata values from a file object."""
msg = message_from_file(file)
def _read_field(name):
value = msg[name]
if value == 'UNKNOWN':
return None
return value
def _read_list(name):
values = msg.get_all(name, None)
if values == []:
return None
return values
self.metadata_version = StrictVersion(msg['metadata-version'])
self.name = _read_field('name')
self.version = _read_field('version')
self.description = _read_field('summary')
# we are filling author only.
self.author = _read_field('author')
self.maintainer = None
self.author_email = _read_field('author-email')
self.maintainer_email = None
self.url = _read_field('home-page')
self.license = _read_field('license')
if 'download-url' in msg:
self.download_url = _read_field('download-url')
else:
self.download_url = None
self.long_description = _read_field('description')
self.description = _read_field('summary')
if 'keywords' in msg:
self.keywords = _read_field('keywords').split(',')
self.platforms = _read_list('platform')
self.classifiers = _read_list('classifier')
# PEP 314 - these fields only exist in 1.1
if self.metadata_version == StrictVersion('1.1'):
self.requires = _read_list('requires')
self.provides = _read_list('provides')
self.obsoletes = _read_list('obsoletes')
else:
self.requires = None
self.provides = None
self.obsoletes = None
# Based on Python 3.5 version
def write_pkg_file(self, file):
"""Write the PKG-INFO format data to a file object.
"""
version = self.get_metadata_version()
if six.PY2:
def write_field(key, value):
file.write("%s: %s\n" % (key, self._encode_field(value)))
else:
def write_field(key, value):
file.write("%s: %s\n" % (key, value))
write_field('Metadata-Version', str(version))
write_field('Name', self.get_name())
write_field('Version', self.get_version())
write_field('Summary', self.get_description())
write_field('Home-page', self.get_url())
if version < StrictVersion('1.2'):
write_field('Author', self.get_contact())
write_field('Author-email', self.get_contact_email())
else:
optional_fields = (
('Author', 'author'),
('Author-email', 'author_email'),
('Maintainer', 'maintainer'),
('Maintainer-email', 'maintainer_email'),
)
for field, attr in optional_fields:
attr_val = getattr(self, attr)
if attr_val is not None:
write_field(field, attr_val)
write_field('License', self.get_license())
if self.download_url:
write_field('Download-URL', self.download_url)
for project_url in self.project_urls.items():
write_field('Project-URL', '%s, %s' % project_url)
long_desc = rfc822_escape(self.get_long_description())
write_field('Description', long_desc)
keywords = ','.join(self.get_keywords())
if keywords:
write_field('Keywords', keywords)
if version >= StrictVersion('1.2'):
for platform in self.get_platforms():
write_field('Platform', platform)
else:
self._write_list(file, 'Platform', self.get_platforms())
self._write_list(file, 'Classifier', self.get_classifiers())
# PEP 314
self._write_list(file, 'Requires', self.get_requires())
self._write_list(file, 'Provides', self.get_provides())
self._write_list(file, 'Obsoletes', self.get_obsoletes())
# Setuptools specific for PEP 345
if hasattr(self, 'python_requires'):
write_field('Requires-Python', self.python_requires)
# PEP 566
if self.long_description_content_type:
write_field(
'Description-Content-Type',
self.long_description_content_type
)
if self.provides_extras:
for extra in self.provides_extras:
write_field('Provides-Extra', extra)
sequence = tuple, list
def check_importable(dist, attr, value):
try:
ep = pkg_resources.EntryPoint.parse('x=' + value)
assert not ep.extras
except (TypeError, ValueError, AttributeError, AssertionError) as e:
raise DistutilsSetupError(
"%r must be importable 'module:attrs' string (got %r)"
% (attr, value)
) from e
def assert_string_list(dist, attr, value):
"""Verify that value is a string list"""
try:
# verify that value is a list or tuple to exclude unordered
# or single-use iterables
assert isinstance(value, (list, tuple))
# verify that elements of value are strings
assert ''.join(value) != value
except (TypeError, ValueError, AttributeError, AssertionError) as e:
raise DistutilsSetupError(
"%r must be a list of strings (got %r)" % (attr, value)
) from e
def check_nsp(dist, attr, value):
"""Verify that namespace packages are valid"""
ns_packages = value
assert_string_list(dist, attr, ns_packages)
for nsp in ns_packages:
if not dist.has_contents_for(nsp):
raise DistutilsSetupError(
"Distribution contains no modules or packages for " +
"namespace package %r" % nsp
)
parent, sep, child = nsp.rpartition('.')
if parent and parent not in ns_packages:
distutils.log.warn(
"WARNING: %r is declared as a package namespace, but %r"
" is not: please correct this in setup.py", nsp, parent
)
def check_extras(dist, attr, value):
"""Verify that extras_require mapping is valid"""
try:
list(itertools.starmap(_check_extra, value.items()))
except (TypeError, ValueError, AttributeError) as e:
raise DistutilsSetupError(
"'extras_require' must be a dictionary whose values are "
"strings or lists of strings containing valid project/version "
"requirement specifiers."
) from e
def _check_extra(extra, reqs):
name, sep, marker = extra.partition(':')
if marker and pkg_resources.invalid_marker(marker):
raise DistutilsSetupError("Invalid environment marker: " + marker)
list(pkg_resources.parse_requirements(reqs))
def assert_bool(dist, attr, value):
"""Verify that value is True, False, 0, or 1"""
if bool(value) != value:
tmpl = "{attr!r} must be a boolean value (got {value!r})"
raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
def check_requirements(dist, attr, value):
"""Verify that install_requires is a valid requirements list"""
try:
list(pkg_resources.parse_requirements(value))
if isinstance(value, (dict, set)):
raise TypeError("Unordered types are not allowed")
except (TypeError, ValueError) as error:
tmpl = (
"{attr!r} must be a string or list of strings "
"containing valid project/version requirement specifiers; {error}"
)
raise DistutilsSetupError(
tmpl.format(attr=attr, error=error)
) from error
def check_specifier(dist, attr, value):
"""Verify that value is a valid version specifier"""
try:
packaging.specifiers.SpecifierSet(value)
except packaging.specifiers.InvalidSpecifier as error:
tmpl = (
"{attr!r} must be a string "
"containing valid version specifiers; {error}"
)
raise DistutilsSetupError(
tmpl.format(attr=attr, error=error)
) from error
def check_entry_points(dist, attr, value):
"""Verify that entry_points map is parseable"""
try:
pkg_resources.EntryPoint.parse_map(value)
except ValueError as e:
raise DistutilsSetupError(e) from e
def check_test_suite(dist, attr, value):
if not isinstance(value, six.string_types):
raise DistutilsSetupError("test_suite must be a string")
def check_package_data(dist, attr, value):
"""Verify that value is a dictionary of package names to glob lists"""
if not isinstance(value, dict):
raise DistutilsSetupError(
"{!r} must be a dictionary mapping package names to lists of "
"string wildcard patterns".format(attr))
for k, v in value.items():
if not isinstance(k, six.string_types):
raise DistutilsSetupError(
"keys of {!r} dict must be strings (got {!r})"
.format(attr, k)
)
assert_string_list(dist, 'values of {!r} dict'.format(attr), v)
def check_packages(dist, attr, value):
for pkgname in value:
if not re.match(r'\w+(\.\w+)*', pkgname):
distutils.log.warn(
"WARNING: %r not a valid package name; please use only "
".-separated package names in setup.py", pkgname
)
_Distribution = get_unpatched(distutils.core.Distribution)
class Distribution(_Distribution):
"""Distribution with support for tests and package data
This is an enhanced version of 'distutils.dist.Distribution' that
effectively adds the following new optional keyword arguments to 'setup()':
'install_requires' -- a string or sequence of strings specifying project
versions that the distribution requires when installed, in the format
used by 'pkg_resources.require()'. They will be installed
automatically when the package is installed. If you wish to use
packages that are not available in PyPI, or want to give your users an
alternate download location, you can add a 'find_links' option to the
'[easy_install]' section of your project's 'setup.cfg' file, and then
setuptools will scan the listed web pages for links that satisfy the
requirements.
'extras_require' -- a dictionary mapping names of optional "extras" to the
additional requirement(s) that using those extras incurs. For example,
this::
extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
indicates that the distribution can optionally provide an extra
capability called "reST", but it can only be used if docutils and
reSTedit are installed. If the user installs your package using
EasyInstall and requests one of your extras, the corresponding
additional requirements will be installed if needed.
'test_suite' -- the name of a test suite to run for the 'test' command.
If the user runs 'python setup.py test', the package will be installed,
and the named test suite will be run. The format is the same as
would be used on a 'unittest.py' command line. That is, it is the
dotted name of an object to import and call to generate a test suite.
'package_data' -- a dictionary mapping package names to lists of filenames
or globs to use to find data files contained in the named packages.
If the dictionary has filenames or globs listed under '""' (the empty
string), those names will be searched for in every package, in addition
to any names for the specific package. Data files found using these
names/globs will be installed along with the package, in the same
location as the package. Note that globs are allowed to reference
the contents of non-package subdirectories, as long as you use '/' as
a path separator. (Globs are automatically converted to
platform-specific paths at runtime.)
In addition to these new keywords, this class also has several new methods
for manipulating the distribution's contents. For example, the 'include()'
and 'exclude()' methods can be thought of as in-place add and subtract
commands that add or remove packages, modules, extensions, and so on from
the distribution.
"""
_DISTUTILS_UNSUPPORTED_METADATA = {
'long_description_content_type': None,
'project_urls': dict,
'provides_extras': ordered_set.OrderedSet,
'license_files': ordered_set.OrderedSet,
}
_patched_dist = None
def patch_missing_pkg_info(self, attrs):
# Fake up a replacement for the data that would normally come from
# PKG-INFO, but which might not yet be built if this is a fresh
# checkout.
#
if not attrs or 'name' not in attrs or 'version' not in attrs:
return
key = pkg_resources.safe_name(str(attrs['name'])).lower()
dist = pkg_resources.working_set.by_key.get(key)
if dist is not None and not dist.has_metadata('PKG-INFO'):
dist._version = pkg_resources.safe_version(str(attrs['version']))
self._patched_dist = dist
def __init__(self, attrs=None):
have_package_data = hasattr(self, "package_data")
if not have_package_data:
self.package_data = {}
attrs = attrs or {}
self.dist_files = []
# Filter-out setuptools' specific options.
self.src_root = attrs.pop("src_root", None)
self.patch_missing_pkg_info(attrs)
self.dependency_links = attrs.pop('dependency_links', [])
self.setup_requires = attrs.pop('setup_requires', [])
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
vars(self).setdefault(ep.name, None)
_Distribution.__init__(self, {
k: v for k, v in attrs.items()
if k not in self._DISTUTILS_UNSUPPORTED_METADATA
})
# Fill-in missing metadata fields not supported by distutils.
# Note some fields may have been set by other tools (e.g. pbr)
# above; they are taken preferrentially to setup() arguments
for option, default in self._DISTUTILS_UNSUPPORTED_METADATA.items():
for source in self.metadata.__dict__, attrs:
if option in source:
value = source[option]
break
else:
value = default() if default else None
setattr(self.metadata, option, value)
self.metadata.version = self._normalize_version(
self._validate_version(self.metadata.version))
self._finalize_requires()
@staticmethod
def _normalize_version(version):
if isinstance(version, setuptools.sic) or version is None:
return version
normalized = str(packaging.version.Version(version))
if version != normalized:
tmpl = "Normalizing '{version}' to '{normalized}'"
warnings.warn(tmpl.format(**locals()))
return normalized
return version
@staticmethod
def _validate_version(version):
if isinstance(version, numbers.Number):
# Some people apparently take "version number" too literally :)
version = str(version)
if version is not None:
try:
packaging.version.Version(version)
except (packaging.version.InvalidVersion, TypeError):
warnings.warn(
"The version specified (%r) is an invalid version, this "
"may not work as expected with newer versions of "
"setuptools, pip, and PyPI. Please see PEP 440 for more "
"details." % version
)
return setuptools.sic(version)
return version
def _finalize_requires(self):
"""
Set `metadata.python_requires` and fix environment markers
in `install_requires` and `extras_require`.
"""
if getattr(self, 'python_requires', None):
self.metadata.python_requires = self.python_requires
if getattr(self, 'extras_require', None):
for extra in self.extras_require.keys():
# Since this gets called multiple times at points where the
# keys have become 'converted' extras, ensure that we are only
# truly adding extras we haven't seen before here.
extra = extra.split(':')[0]
if extra:
self.metadata.provides_extras.add(extra)
self._convert_extras_requirements()
self._move_install_requirements_markers()
def _convert_extras_requirements(self):
"""
Convert requirements in `extras_require` of the form
`"extra": ["barbazquux; {marker}"]` to
`"extra:{marker}": ["barbazquux"]`.
"""
spec_ext_reqs = getattr(self, 'extras_require', None) or {}
self._tmp_extras_require = defaultdict(list)
for section, v in spec_ext_reqs.items():
# Do not strip empty sections.
self._tmp_extras_require[section]
for r in pkg_resources.parse_requirements(v):
suffix = self._suffix_for(r)
self._tmp_extras_require[section + suffix].append(r)
@staticmethod
def _suffix_for(req):
"""
For a requirement, return the 'extras_require' suffix for
that requirement.
"""
return ':' + str(req.marker) if req.marker else ''
def _move_install_requirements_markers(self):
"""
Move requirements in `install_requires` that are using environment
markers `extras_require`.
"""
# divide the install_requires into two sets, simple ones still
# handled by install_requires and more complex ones handled
# by extras_require.
def is_simple_req(req):
return not req.marker
spec_inst_reqs = getattr(self, 'install_requires', None) or ()
inst_reqs = list(pkg_resources.parse_requirements(spec_inst_reqs))
simple_reqs = filter(is_simple_req, inst_reqs)
complex_reqs = filterfalse(is_simple_req, inst_reqs)
self.install_requires = list(map(str, simple_reqs))
for r in complex_reqs:
self._tmp_extras_require[':' + str(r.marker)].append(r)
self.extras_require = dict(
(k, [str(r) for r in map(self._clean_req, v)])
for k, v in self._tmp_extras_require.items()
)
def _clean_req(self, req):
"""
Given a Requirement, remove environment markers and return it.
"""
req.marker = None
return req
def _parse_config_files(self, filenames=None):
"""
Adapted from distutils.dist.Distribution.parse_config_files,
this method provides the same functionality in subtly-improved
ways.
"""
from setuptools.extern.six.moves.configparser import ConfigParser
# Ignore install directory options if we have a venv
if not six.PY2 and sys.prefix != sys.base_prefix:
ignore_options = [
'install-base', 'install-platbase', 'install-lib',
'install-platlib', 'install-purelib', 'install-headers',
'install-scripts', 'install-data', 'prefix', 'exec-prefix',
'home', 'user', 'root']
else:
ignore_options = []
ignore_options = frozenset(ignore_options)
if filenames is None:
filenames = self.find_config_files()
if DEBUG:
self.announce("Distribution.parse_config_files():")
parser = ConfigParser()
for filename in filenames:
with io.open(filename, encoding='utf-8') as reader:
if DEBUG:
self.announce(" reading {filename}".format(**locals()))
(parser.readfp if six.PY2 else parser.read_file)(reader)
for section in parser.sections():
options = parser.options(section)
opt_dict = self.get_option_dict(section)
for opt in options:
if opt != '__name__' and opt not in ignore_options:
val = self._try_str(parser.get(section, opt))
opt = opt.replace('-', '_')
opt_dict[opt] = (filename, val)
# Make the ConfigParser forget everything (so we retain
# the original filenames that options come from)
parser.__init__()
# If there was a "global" section in the config file, use it
# to set Distribution options.
if 'global' in self.command_options:
for (opt, (src, val)) in self.command_options['global'].items():
alias = self.negative_opt.get(opt)
try:
if alias:
setattr(self, alias, not strtobool(val))
elif opt in ('verbose', 'dry_run'): # ugh!
setattr(self, opt, strtobool(val))
else:
setattr(self, opt, val)
except ValueError as e:
raise DistutilsOptionError(e) from e
@staticmethod
def _try_str(val):
"""
On Python 2, much of distutils relies on string values being of
type 'str' (bytes) and not unicode text. If the value can be safely
encoded to bytes using the default encoding, prefer that.
Why the default encoding? Because that value can be implicitly
decoded back to text if needed.
Ref #1653
"""
if not six.PY2:
return val
try:
return val.encode()
except UnicodeEncodeError:
pass
return val
def _set_command_options(self, command_obj, option_dict=None):
"""
Set the options for 'command_obj' from 'option_dict'. Basically
this means copying elements of a dictionary ('option_dict') to
attributes of an instance ('command').
'command_obj' must be a Command instance. If 'option_dict' is not
supplied, uses the standard option dictionary for this command
(from 'self.command_options').
(Adopted from distutils.dist.Distribution._set_command_options)
"""
command_name = command_obj.get_command_name()
if option_dict is None:
option_dict = self.get_option_dict(command_name)
if DEBUG:
self.announce(" setting options for '%s' command:" % command_name)
for (option, (source, value)) in option_dict.items():
if DEBUG:
self.announce(" %s = %s (from %s)" % (option, value,
source))
try:
bool_opts = [translate_longopt(o)
for o in command_obj.boolean_options]
except AttributeError:
bool_opts = []
try:
neg_opt = command_obj.negative_opt
except AttributeError:
neg_opt = {}
try:
is_string = isinstance(value, six.string_types)
if option in neg_opt and is_string:
setattr(command_obj, neg_opt[option], not strtobool(value))
elif option in bool_opts and is_string:
setattr(command_obj, option, strtobool(value))
elif hasattr(command_obj, option):
setattr(command_obj, option, value)
else:
raise DistutilsOptionError(
"error in %s: command '%s' has no such option '%s'"
% (source, command_name, option))
except ValueError as e:
raise DistutilsOptionError(e) from e
def parse_config_files(self, filenames=None, ignore_option_errors=False):
"""Parses configuration files from various levels
and loads configuration.
"""
self._parse_config_files(filenames=filenames)
parse_configuration(self, self.command_options,
ignore_option_errors=ignore_option_errors)
self._finalize_requires()
def fetch_build_eggs(self, requires):
"""Resolve pre-setup requirements"""
resolved_dists = pkg_resources.working_set.resolve(
pkg_resources.parse_requirements(requires),
installer=self.fetch_build_egg,
replace_conflicting=True,
)
for dist in resolved_dists:
pkg_resources.working_set.add(dist, replace=True)
return resolved_dists
def finalize_options(self):
"""
Allow plugins to apply arbitrary operations to the
distribution. Each hook may optionally define a 'order'
to influence the order of execution. Smaller numbers
go first and the default is 0.
"""
group = 'setuptools.finalize_distribution_options'
def by_order(hook):
return getattr(hook, 'order', 0)
eps = map(lambda e: e.load(), pkg_resources.iter_entry_points(group))
for ep in sorted(eps, key=by_order):
ep(self)
def _finalize_setup_keywords(self):
for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
value = getattr(self, ep.name, None)
if value is not None:
ep.require(installer=self.fetch_build_egg)
ep.load()(self, ep.name, value)
def _finalize_2to3_doctests(self):
if getattr(self, 'convert_2to3_doctests', None):
# XXX may convert to set here when we can rely on set being builtin
self.convert_2to3_doctests = [
os.path.abspath(p)
for p in self.convert_2to3_doctests
]
else:
self.convert_2to3_doctests = []
def get_egg_cache_dir(self):
egg_cache_dir = os.path.join(os.curdir, '.eggs')
if not os.path.exists(egg_cache_dir):
os.mkdir(egg_cache_dir)
windows_support.hide_file(egg_cache_dir)
readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
with open(readme_txt_filename, 'w') as f:
f.write('This directory contains eggs that were downloaded '
'by setuptools to build, test, and run plug-ins.\n\n')
f.write('This directory caches those eggs to prevent '
'repeated downloads.\n\n')
f.write('However, it is safe to delete this directory.\n\n')
return egg_cache_dir
def fetch_build_egg(self, req):
"""Fetch an egg needed for building"""
from setuptools.installer import fetch_build_egg
return fetch_build_egg(self, req)
def get_command_class(self, command):
"""Pluggable version of get_command_class()"""
if command in self.cmdclass:
return self.cmdclass[command]
eps = pkg_resources.iter_entry_points('distutils.commands', command)
for ep in eps:
ep.require(installer=self.fetch_build_egg)
self.cmdclass[command] = cmdclass = ep.load()
return cmdclass
else:
return _Distribution.get_command_class(self, command)
def print_commands(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.print_commands(self)
def get_command_list(self):
for ep in pkg_resources.iter_entry_points('distutils.commands'):
if ep.name not in self.cmdclass:
# don't require extras as the commands won't be invoked
cmdclass = ep.resolve()
self.cmdclass[ep.name] = cmdclass
return _Distribution.get_command_list(self)
def include(self, **attrs):
"""Add items to distribution that are named in keyword arguments
For example, 'dist.include(py_modules=["x"])' would add 'x' to
the distribution's 'py_modules' attribute, if it was not already
there.
Currently, this method only supports inclusion for attributes that are
lists or tuples. If you need to add support for adding to other
attributes in this or a subclass, you can add an '_include_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
will try to call 'dist._include_foo({"bar":"baz"})', which can then
handle whatever special inclusion logic is needed.
"""
for k, v in attrs.items():
include = getattr(self, '_include_' + k, None)
if include:
include(v)
else:
self._include_misc(k, v)
def exclude_package(self, package):
"""Remove packages, modules, and extensions in named package"""
pfx = package + '.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
]
def has_contents_for(self, package):
"""Return true if 'exclude_package(package)' would do something"""
pfx = package + '.'
for p in self.iter_distribution_names():
if p == package or p.startswith(pfx):
return True
def _exclude_misc(self, name, value):
"""Handle 'exclude()' for list/tuple attrs without a special handler"""
if not isinstance(value, sequence):
raise DistutilsSetupError(
"%s: setting must be a list or tuple (%r)" % (name, value)
)
try:
old = getattr(self, name)
except AttributeError as e:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
) from e
if old is not None and not isinstance(old, sequence):
raise DistutilsSetupError(
name + ": this setting cannot be changed via include/exclude"
)
elif old:
setattr(self, name, [item for item in old if item not in value])
def _include_misc(self, name, value):
"""Handle 'include()' for list/tuple attrs without a special handler"""
if not isinstance(value, sequence):
raise DistutilsSetupError(
"%s: setting must be a list (%r)" % (name, value)
)
try:
old = getattr(self, name)
except AttributeError as e:
raise DistutilsSetupError(
"%s: No such distribution setting" % name
) from e
if old is None:
setattr(self, name, value)
elif not isinstance(old, sequence):
raise DistutilsSetupError(
name + ": this setting cannot be changed via include/exclude"
)
else:
new = [item for item in value if item not in old]
setattr(self, name, old + new)
def exclude(self, **attrs):
"""Remove items from distribution that are named in keyword arguments
For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
the distribution's 'py_modules' attribute. Excluding packages uses
the 'exclude_package()' method, so all of the package's contained
packages, modules, and extensions are also excluded.
Currently, this method only supports exclusion from attributes that are
lists or tuples. If you need to add support for excluding from other
attributes in this or a subclass, you can add an '_exclude_X' method,
where 'X' is the name of the attribute. The method will be called with
the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
handle whatever special exclusion logic is needed.
"""
for k, v in attrs.items():
exclude = getattr(self, '_exclude_' + k, None)
if exclude:
exclude(v)
else:
self._exclude_misc(k, v)
def _exclude_packages(self, packages):
if not isinstance(packages, sequence):
raise DistutilsSetupError(
"packages: setting must be a list or tuple (%r)" % (packages,)
)
list(map(self.exclude_package, packages))
def _parse_command_opts(self, parser, args):
# Remove --with-X/--without-X options when processing command args
self.global_options = self.__class__.global_options
self.negative_opt = self.__class__.negative_opt
# First, expand any aliases
command = args[0]
aliases = self.get_option_dict('aliases')
while command in aliases:
src, alias = aliases[command]
del aliases[command] # ensure each alias can expand only once!
import shlex
args[:1] = shlex.split(alias, True)
command = args[0]
nargs = _Distribution._parse_command_opts(self, parser, args)
# Handle commands that want to consume all remaining arguments
cmd_class = self.get_command_class(command)
if getattr(cmd_class, 'command_consumes_arguments', None):
self.get_option_dict(command)['args'] = ("command line", nargs)
if nargs is not None:
return []
return nargs
def get_cmdline_options(self):
"""Return a '{cmd: {opt:val}}' map of all command-line options
Option names are all long, but do not include the leading '--', and
contain dashes rather than underscores. If the option doesn't take
an argument (e.g. '--quiet'), the 'val' is 'None'.
Note that options provided by config files are intentionally excluded.
"""
d = {}
for cmd, opts in self.command_options.items():
for opt, (src, val) in opts.items():
if src != "command line":
continue
opt = opt.replace('_', '-')
if val == 0:
cmdobj = self.get_command_obj(cmd)
neg_opt = self.negative_opt.copy()
neg_opt.update(getattr(cmdobj, 'negative_opt', {}))
for neg, pos in neg_opt.items():
if pos == opt:
opt = neg
val = None
break
else:
raise AssertionError("Shouldn't be able to get here")
elif val == 1:
val = None
d.setdefault(cmd, {})[opt] = val
return d
def iter_distribution_names(self):
"""Yield all packages, modules, and extension names in distribution"""
for pkg in self.packages or ():
yield pkg
for module in self.py_modules or ():
yield module
for ext in self.ext_modules or ():
if isinstance(ext, tuple):
name, buildinfo = ext
else:
name = ext.name
if name.endswith('module'):
name = name[:-6]
yield name
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
import sys
if six.PY2 or self.help_commands:
return _Distribution.handle_display_options(self, option_order)
# Stdout may be StringIO (e.g. in tests)
if not isinstance(sys.stdout, io.TextIOWrapper):
return _Distribution.handle_display_options(self, option_order)
# Don't wrap stdout if utf-8 is already the encoding. Provides
# workaround for #334.
if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
return _Distribution.handle_display_options(self, option_order)
# Print metadata in UTF-8 no matter the platform
encoding = sys.stdout.encoding
errors = sys.stdout.errors
newline = sys.platform != 'win32' and '\n' or None
line_buffering = sys.stdout.line_buffering
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
try:
return _Distribution.handle_display_options(self, option_order)
finally:
sys.stdout = io.TextIOWrapper(
sys.stdout.detach(), encoding, errors, newline, line_buffering)
class DistDeprecationWarning(SetuptoolsDeprecationWarning):
"""Class for warning about deprecations in dist in
setuptools. Not ignored by default, unlike DeprecationWarning."""
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/lib2to3_ex.py
|
"""
Customized Mixin2to3 support:
- adds support for converting doctests
This module raises an ImportError on Python 2.
"""
import warnings
from distutils.util import Mixin2to3 as _Mixin2to3
from distutils import log
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
import setuptools
from ._deprecation_warning import SetuptoolsDeprecationWarning
class DistutilsRefactoringTool(RefactoringTool):
def log_error(self, msg, *args, **kw):
log.error(msg, *args)
def log_message(self, msg, *args):
log.info(msg, *args)
def log_debug(self, msg, *args):
log.debug(msg, *args)
class Mixin2to3(_Mixin2to3):
def run_2to3(self, files, doctests=False):
# See of the distribution option has been set, otherwise check the
# setuptools default.
if self.distribution.use_2to3 is not True:
return
if not files:
return
warnings.warn(
"2to3 support is deprecated. If the project still "
"requires Python 2 support, please migrate to "
"a single-codebase solution or employ an "
"independent conversion process.",
SetuptoolsDeprecationWarning)
log.info("Fixing " + " ".join(files))
self.__build_fixer_names()
self.__exclude_fixers()
if doctests:
if setuptools.run_2to3_on_doctests:
r = DistutilsRefactoringTool(self.fixer_names)
r.refactor(files, write=True, doctests_only=True)
else:
_Mixin2to3.run_2to3(self, files)
def __build_fixer_names(self):
if self.fixer_names:
return
self.fixer_names = []
for p in setuptools.lib2to3_fixer_packages:
self.fixer_names.extend(get_fixers_from_package(p))
if self.distribution.use_2to3_fixers is not None:
for p in self.distribution.use_2to3_fixers:
self.fixer_names.extend(get_fixers_from_package(p))
def __exclude_fixers(self):
excluded_fixers = getattr(self, 'exclude_fixers', [])
if self.distribution.use_2to3_exclude_fixers is not None:
excluded_fixers.extend(self.distribution.use_2to3_exclude_fixers)
for fixer_name in excluded_fixers:
if fixer_name in self.fixer_names:
self.fixer_names.remove(fixer_name)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/py33compat.py
|
import dis
import array
import collections
try:
import html
except ImportError:
html = None
from setuptools.extern import six
from setuptools.extern.six.moves import html_parser
__metaclass__ = type
OpArg = collections.namedtuple('OpArg', 'opcode arg')
class Bytecode_compat:
def __init__(self, code):
self.code = code
def __iter__(self):
"""Yield '(op,arg)' pair for each operation in code object 'code'"""
bytes = array.array('b', self.code.co_code)
eof = len(self.code.co_code)
ptr = 0
extended_arg = 0
while ptr < eof:
op = bytes[ptr]
if op >= dis.HAVE_ARGUMENT:
arg = bytes[ptr + 1] + bytes[ptr + 2] * 256 + extended_arg
ptr += 3
if op == dis.EXTENDED_ARG:
long_type = six.integer_types[-1]
extended_arg = arg * long_type(65536)
continue
else:
arg = None
ptr += 1
yield OpArg(op, arg)
Bytecode = getattr(dis, 'Bytecode', Bytecode_compat)
unescape = getattr(html, 'unescape', None)
if unescape is None:
# HTMLParser.unescape is deprecated since Python 3.4, and will be removed
# from 3.9.
unescape = html_parser.HTMLParser().unescape
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/wheel.py
|
"""Wheels support."""
from distutils.util import get_platform
from distutils import log
import email
import itertools
import os
import posixpath
import re
import zipfile
import pkg_resources
import setuptools
from pkg_resources import parse_version
from setuptools.extern.packaging.tags import sys_tags
from setuptools.extern.packaging.utils import canonicalize_name
from setuptools.extern.six import PY3
from setuptools.command.egg_info import write_requirements
__metaclass__ = type
WHEEL_NAME = re.compile(
r"""^(?P<project_name>.+?)-(?P<version>\d.*?)
((-(?P<build>\d.*?))?-(?P<py_version>.+?)-(?P<abi>.+?)-(?P<platform>.+?)
)\.whl$""",
re.VERBOSE).match
NAMESPACE_PACKAGE_INIT = \
"__import__('pkg_resources').declare_namespace(__name__)\n"
def unpack(src_dir, dst_dir):
'''Move everything under `src_dir` to `dst_dir`, and delete the former.'''
for dirpath, dirnames, filenames in os.walk(src_dir):
subdir = os.path.relpath(dirpath, src_dir)
for f in filenames:
src = os.path.join(dirpath, f)
dst = os.path.join(dst_dir, subdir, f)
os.renames(src, dst)
for n, d in reversed(list(enumerate(dirnames))):
src = os.path.join(dirpath, d)
dst = os.path.join(dst_dir, subdir, d)
if not os.path.exists(dst):
# Directory does not exist in destination,
# rename it and prune it from os.walk list.
os.renames(src, dst)
del dirnames[n]
# Cleanup.
for dirpath, dirnames, filenames in os.walk(src_dir, topdown=True):
assert not filenames
os.rmdir(dirpath)
class Wheel:
def __init__(self, filename):
match = WHEEL_NAME(os.path.basename(filename))
if match is None:
raise ValueError('invalid wheel name: %r' % filename)
self.filename = filename
for k, v in match.groupdict().items():
setattr(self, k, v)
def tags(self):
'''List tags (py_version, abi, platform) supported by this wheel.'''
return itertools.product(
self.py_version.split('.'),
self.abi.split('.'),
self.platform.split('.'),
)
def is_compatible(self):
'''Is the wheel is compatible with the current platform?'''
supported_tags = set(
(t.interpreter, t.abi, t.platform) for t in sys_tags())
return next((True for t in self.tags() if t in supported_tags), False)
def egg_name(self):
return pkg_resources.Distribution(
project_name=self.project_name, version=self.version,
platform=(None if self.platform == 'any' else get_platform()),
).egg_name() + '.egg'
def get_dist_info(self, zf):
# find the correct name of the .dist-info dir in the wheel file
for member in zf.namelist():
dirname = posixpath.dirname(member)
if (dirname.endswith('.dist-info') and
canonicalize_name(dirname).startswith(
canonicalize_name(self.project_name))):
return dirname
raise ValueError("unsupported wheel format. .dist-info not found")
def install_as_egg(self, destination_eggdir):
'''Install wheel as an egg directory.'''
with zipfile.ZipFile(self.filename) as zf:
self._install_as_egg(destination_eggdir, zf)
def _install_as_egg(self, destination_eggdir, zf):
dist_basename = '%s-%s' % (self.project_name, self.version)
dist_info = self.get_dist_info(zf)
dist_data = '%s.data' % dist_basename
egg_info = os.path.join(destination_eggdir, 'EGG-INFO')
self._convert_metadata(zf, destination_eggdir, dist_info, egg_info)
self._move_data_entries(destination_eggdir, dist_data)
self._fix_namespace_packages(egg_info, destination_eggdir)
@staticmethod
def _convert_metadata(zf, destination_eggdir, dist_info, egg_info):
def get_metadata(name):
with zf.open(posixpath.join(dist_info, name)) as fp:
value = fp.read().decode('utf-8') if PY3 else fp.read()
return email.parser.Parser().parsestr(value)
wheel_metadata = get_metadata('WHEEL')
# Check wheel format version is supported.
wheel_version = parse_version(wheel_metadata.get('Wheel-Version'))
wheel_v1 = (
parse_version('1.0') <= wheel_version < parse_version('2.0dev0')
)
if not wheel_v1:
raise ValueError(
'unsupported wheel format version: %s' % wheel_version)
# Extract to target directory.
os.mkdir(destination_eggdir)
zf.extractall(destination_eggdir)
# Convert metadata.
dist_info = os.path.join(destination_eggdir, dist_info)
dist = pkg_resources.Distribution.from_location(
destination_eggdir, dist_info,
metadata=pkg_resources.PathMetadata(destination_eggdir, dist_info),
)
# Note: Evaluate and strip markers now,
# as it's difficult to convert back from the syntax:
# foobar; "linux" in sys_platform and extra == 'test'
def raw_req(req):
req.marker = None
return str(req)
install_requires = list(sorted(map(raw_req, dist.requires())))
extras_require = {
extra: sorted(
req
for req in map(raw_req, dist.requires((extra,)))
if req not in install_requires
)
for extra in dist.extras
}
os.rename(dist_info, egg_info)
os.rename(
os.path.join(egg_info, 'METADATA'),
os.path.join(egg_info, 'PKG-INFO'),
)
setup_dist = setuptools.Distribution(
attrs=dict(
install_requires=install_requires,
extras_require=extras_require,
),
)
# Temporarily disable info traces.
log_threshold = log._global_log.threshold
log.set_threshold(log.WARN)
try:
write_requirements(
setup_dist.get_command_obj('egg_info'),
None,
os.path.join(egg_info, 'requires.txt'),
)
finally:
log.set_threshold(log_threshold)
@staticmethod
def _move_data_entries(destination_eggdir, dist_data):
"""Move data entries to their correct location."""
dist_data = os.path.join(destination_eggdir, dist_data)
dist_data_scripts = os.path.join(dist_data, 'scripts')
if os.path.exists(dist_data_scripts):
egg_info_scripts = os.path.join(
destination_eggdir, 'EGG-INFO', 'scripts')
os.mkdir(egg_info_scripts)
for entry in os.listdir(dist_data_scripts):
# Remove bytecode, as it's not properly handled
# during easy_install scripts install phase.
if entry.endswith('.pyc'):
os.unlink(os.path.join(dist_data_scripts, entry))
else:
os.rename(
os.path.join(dist_data_scripts, entry),
os.path.join(egg_info_scripts, entry),
)
os.rmdir(dist_data_scripts)
for subdir in filter(os.path.exists, (
os.path.join(dist_data, d)
for d in ('data', 'headers', 'purelib', 'platlib')
)):
unpack(subdir, destination_eggdir)
if os.path.exists(dist_data):
os.rmdir(dist_data)
@staticmethod
def _fix_namespace_packages(egg_info, destination_eggdir):
namespace_packages = os.path.join(
egg_info, 'namespace_packages.txt')
if os.path.exists(namespace_packages):
with open(namespace_packages) as fp:
namespace_packages = fp.read().split()
for mod in namespace_packages:
mod_dir = os.path.join(destination_eggdir, *mod.split('.'))
mod_init = os.path.join(mod_dir, '__init__.py')
if not os.path.exists(mod_dir):
os.mkdir(mod_dir)
if not os.path.exists(mod_init):
with open(mod_init, 'w') as fp:
fp.write(NAMESPACE_PACKAGE_INIT)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/namespaces.py
|
import os
from distutils import log
import itertools
from setuptools.extern.six.moves import map
flatten = itertools.chain.from_iterable
class Installer:
nspkg_ext = '-nspkg.pth'
def install_namespaces(self):
nsp = self._get_all_ns_packages()
if not nsp:
return
filename, ext = os.path.splitext(self._get_target())
filename += self.nspkg_ext
self.outputs.append(filename)
log.info("Installing %s", filename)
lines = map(self._gen_nspkg_line, nsp)
if self.dry_run:
# always generate the lines, even in dry run
list(lines)
return
with open(filename, 'wt') as f:
f.writelines(lines)
def uninstall_namespaces(self):
filename, ext = os.path.splitext(self._get_target())
filename += self.nspkg_ext
if not os.path.exists(filename):
return
log.info("Removing %s", filename)
os.remove(filename)
def _get_target(self):
return self.target
_nspkg_tmpl = (
"import sys, types, os",
"has_mfs = sys.version_info > (3, 5)",
"p = os.path.join(%(root)s, *%(pth)r)",
"importlib = has_mfs and __import__('importlib.util')",
"has_mfs and __import__('importlib.machinery')",
(
"m = has_mfs and "
"sys.modules.setdefault(%(pkg)r, "
"importlib.util.module_from_spec("
"importlib.machinery.PathFinder.find_spec(%(pkg)r, "
"[os.path.dirname(p)])))"
),
(
"m = m or "
"sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))"
),
"mp = (m or []) and m.__dict__.setdefault('__path__',[])",
"(p not in mp) and mp.append(p)",
)
"lines for the namespace installer"
_nspkg_tmpl_multi = (
'm and setattr(sys.modules[%(parent)r], %(child)r, m)',
)
"additional line(s) when a parent package is indicated"
def _get_root(self):
return "sys._getframe(1).f_locals['sitedir']"
def _gen_nspkg_line(self, pkg):
# ensure pkg is not a unicode string under Python 2.7
pkg = str(pkg)
pth = tuple(pkg.split('.'))
root = self._get_root()
tmpl_lines = self._nspkg_tmpl
parent, sep, child = pkg.rpartition('.')
if parent:
tmpl_lines += self._nspkg_tmpl_multi
return ';'.join(tmpl_lines) % locals() + '\n'
def _get_all_ns_packages(self):
"""Return sorted list of all package namespaces"""
pkgs = self.distribution.namespace_packages or []
return sorted(flatten(map(self._pkg_names, pkgs)))
@staticmethod
def _pkg_names(pkg):
"""
Given a namespace package, yield the components of that
package.
>>> names = Installer._pkg_names('a.b.c')
>>> set(names) == set(['a', 'a.b', 'a.b.c'])
True
"""
parts = pkg.split('.')
while parts:
yield '.'.join(parts)
parts.pop()
class DevelopInstaller(Installer):
def _get_root(self):
return repr(str(self.egg_path))
def _get_target(self):
return self.egg_link
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/script (dev).tmpl
|
# EASY-INSTALL-DEV-SCRIPT: %(spec)r,%(script_name)r
__requires__ = %(spec)r
__import__('pkg_resources').require(%(spec)r)
__file__ = %(dev_path)r
with open(__file__) as f:
exec(compile(f.read(), __file__, 'exec'))
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor/six.py
|
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.10.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor/ordered_set.py
|
"""
An OrderedSet is a custom MutableSet that remembers its order, so that every
entry has an index that can be looked up.
Based on a recipe originally posted to ActiveState Recipes by Raymond Hettiger,
and released under the MIT license.
"""
import itertools as it
from collections import deque
try:
# Python 3
from collections.abc import MutableSet, Sequence
except ImportError:
# Python 2.7
from collections import MutableSet, Sequence
SLICE_ALL = slice(None)
__version__ = "3.1"
def is_iterable(obj):
"""
Are we being asked to look up a list of things, instead of a single thing?
We check for the `__iter__` attribute so that this can cover types that
don't have to be known by this module, such as NumPy arrays.
Strings, however, should be considered as atomic values to look up, not
iterables. The same goes for tuples, since they are immutable and therefore
valid entries.
We don't need to check for the Python 2 `unicode` type, because it doesn't
have an `__iter__` attribute anyway.
"""
return (
hasattr(obj, "__iter__")
and not isinstance(obj, str)
and not isinstance(obj, tuple)
)
class OrderedSet(MutableSet, Sequence):
"""
An OrderedSet is a custom MutableSet that remembers its order, so that
every entry has an index that can be looked up.
Example:
>>> OrderedSet([1, 1, 2, 3, 2])
OrderedSet([1, 2, 3])
"""
def __init__(self, iterable=None):
self.items = []
self.map = {}
if iterable is not None:
self |= iterable
def __len__(self):
"""
Returns the number of unique elements in the ordered set
Example:
>>> len(OrderedSet([]))
0
>>> len(OrderedSet([1, 2]))
2
"""
return len(self.items)
def __getitem__(self, index):
"""
Get the item at a given index.
If `index` is a slice, you will get back that slice of items, as a
new OrderedSet.
If `index` is a list or a similar iterable, you'll get a list of
items corresponding to those indices. This is similar to NumPy's
"fancy indexing". The result is not an OrderedSet because you may ask
for duplicate indices, and the number of elements returned should be
the number of elements asked for.
Example:
>>> oset = OrderedSet([1, 2, 3])
>>> oset[1]
2
"""
if isinstance(index, slice) and index == SLICE_ALL:
return self.copy()
elif is_iterable(index):
return [self.items[i] for i in index]
elif hasattr(index, "__index__") or isinstance(index, slice):
result = self.items[index]
if isinstance(result, list):
return self.__class__(result)
else:
return result
else:
raise TypeError("Don't know how to index an OrderedSet by %r" % index)
def copy(self):
"""
Return a shallow copy of this object.
Example:
>>> this = OrderedSet([1, 2, 3])
>>> other = this.copy()
>>> this == other
True
>>> this is other
False
"""
return self.__class__(self)
def __getstate__(self):
if len(self) == 0:
# The state can't be an empty list.
# We need to return a truthy value, or else __setstate__ won't be run.
#
# This could have been done more gracefully by always putting the state
# in a tuple, but this way is backwards- and forwards- compatible with
# previous versions of OrderedSet.
return (None,)
else:
return list(self)
def __setstate__(self, state):
if state == (None,):
self.__init__([])
else:
self.__init__(state)
def __contains__(self, key):
"""
Test if the item is in this ordered set
Example:
>>> 1 in OrderedSet([1, 3, 2])
True
>>> 5 in OrderedSet([1, 3, 2])
False
"""
return key in self.map
def add(self, key):
"""
Add `key` as an item to this OrderedSet, then return its index.
If `key` is already in the OrderedSet, return the index it already
had.
Example:
>>> oset = OrderedSet()
>>> oset.append(3)
0
>>> print(oset)
OrderedSet([3])
"""
if key not in self.map:
self.map[key] = len(self.items)
self.items.append(key)
return self.map[key]
append = add
def update(self, sequence):
"""
Update the set with the given iterable sequence, then return the index
of the last element inserted.
Example:
>>> oset = OrderedSet([1, 2, 3])
>>> oset.update([3, 1, 5, 1, 4])
4
>>> print(oset)
OrderedSet([1, 2, 3, 5, 4])
"""
item_index = None
try:
for item in sequence:
item_index = self.add(item)
except TypeError:
raise ValueError(
"Argument needs to be an iterable, got %s" % type(sequence)
)
return item_index
def index(self, key):
"""
Get the index of a given entry, raising an IndexError if it's not
present.
`key` can be an iterable of entries that is not a string, in which case
this returns a list of indices.
Example:
>>> oset = OrderedSet([1, 2, 3])
>>> oset.index(2)
1
"""
if is_iterable(key):
return [self.index(subkey) for subkey in key]
return self.map[key]
# Provide some compatibility with pd.Index
get_loc = index
get_indexer = index
def pop(self):
"""
Remove and return the last element from the set.
Raises KeyError if the set is empty.
Example:
>>> oset = OrderedSet([1, 2, 3])
>>> oset.pop()
3
"""
if not self.items:
raise KeyError("Set is empty")
elem = self.items[-1]
del self.items[-1]
del self.map[elem]
return elem
def discard(self, key):
"""
Remove an element. Do not raise an exception if absent.
The MutableSet mixin uses this to implement the .remove() method, which
*does* raise an error when asked to remove a non-existent item.
Example:
>>> oset = OrderedSet([1, 2, 3])
>>> oset.discard(2)
>>> print(oset)
OrderedSet([1, 3])
>>> oset.discard(2)
>>> print(oset)
OrderedSet([1, 3])
"""
if key in self:
i = self.map[key]
del self.items[i]
del self.map[key]
for k, v in self.map.items():
if v >= i:
self.map[k] = v - 1
def clear(self):
"""
Remove all items from this OrderedSet.
"""
del self.items[:]
self.map.clear()
def __iter__(self):
"""
Example:
>>> list(iter(OrderedSet([1, 2, 3])))
[1, 2, 3]
"""
return iter(self.items)
def __reversed__(self):
"""
Example:
>>> list(reversed(OrderedSet([1, 2, 3])))
[3, 2, 1]
"""
return reversed(self.items)
def __repr__(self):
if not self:
return "%s()" % (self.__class__.__name__,)
return "%s(%r)" % (self.__class__.__name__, list(self))
def __eq__(self, other):
"""
Returns true if the containers have the same items. If `other` is a
Sequence, then order is checked, otherwise it is ignored.
Example:
>>> oset = OrderedSet([1, 3, 2])
>>> oset == [1, 3, 2]
True
>>> oset == [1, 2, 3]
False
>>> oset == [2, 3]
False
>>> oset == OrderedSet([3, 2, 1])
False
"""
# In Python 2 deque is not a Sequence, so treat it as one for
# consistent behavior with Python 3.
if isinstance(other, (Sequence, deque)):
# Check that this OrderedSet contains the same elements, in the
# same order, as the other object.
return list(self) == list(other)
try:
other_as_set = set(other)
except TypeError:
# If `other` can't be converted into a set, it's not equal.
return False
else:
return set(self) == other_as_set
def union(self, *sets):
"""
Combines all unique items.
Each items order is defined by its first appearance.
Example:
>>> oset = OrderedSet.union(OrderedSet([3, 1, 4, 1, 5]), [1, 3], [2, 0])
>>> print(oset)
OrderedSet([3, 1, 4, 5, 2, 0])
>>> oset.union([8, 9])
OrderedSet([3, 1, 4, 5, 2, 0, 8, 9])
>>> oset | {10}
OrderedSet([3, 1, 4, 5, 2, 0, 10])
"""
cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
containers = map(list, it.chain([self], sets))
items = it.chain.from_iterable(containers)
return cls(items)
def __and__(self, other):
# the parent implementation of this is backwards
return self.intersection(other)
def intersection(self, *sets):
"""
Returns elements in common between all sets. Order is defined only
by the first set.
Example:
>>> oset = OrderedSet.intersection(OrderedSet([0, 1, 2, 3]), [1, 2, 3])
>>> print(oset)
OrderedSet([1, 2, 3])
>>> oset.intersection([2, 4, 5], [1, 2, 3, 4])
OrderedSet([2])
>>> oset.intersection()
OrderedSet([1, 2, 3])
"""
cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
if sets:
common = set.intersection(*map(set, sets))
items = (item for item in self if item in common)
else:
items = self
return cls(items)
def difference(self, *sets):
"""
Returns all elements that are in this set but not the others.
Example:
>>> OrderedSet([1, 2, 3]).difference(OrderedSet([2]))
OrderedSet([1, 3])
>>> OrderedSet([1, 2, 3]).difference(OrderedSet([2]), OrderedSet([3]))
OrderedSet([1])
>>> OrderedSet([1, 2, 3]) - OrderedSet([2])
OrderedSet([1, 3])
>>> OrderedSet([1, 2, 3]).difference()
OrderedSet([1, 2, 3])
"""
cls = self.__class__
if sets:
other = set.union(*map(set, sets))
items = (item for item in self if item not in other)
else:
items = self
return cls(items)
def issubset(self, other):
"""
Report whether another set contains this set.
Example:
>>> OrderedSet([1, 2, 3]).issubset({1, 2})
False
>>> OrderedSet([1, 2, 3]).issubset({1, 2, 3, 4})
True
>>> OrderedSet([1, 2, 3]).issubset({1, 4, 3, 5})
False
"""
if len(self) > len(other): # Fast check for obvious cases
return False
return all(item in other for item in self)
def issuperset(self, other):
"""
Report whether this set contains another set.
Example:
>>> OrderedSet([1, 2]).issuperset([1, 2, 3])
False
>>> OrderedSet([1, 2, 3, 4]).issuperset({1, 2, 3})
True
>>> OrderedSet([1, 4, 3, 5]).issuperset({1, 2, 3})
False
"""
if len(self) < len(other): # Fast check for obvious cases
return False
return all(item in self for item in other)
def symmetric_difference(self, other):
"""
Return the symmetric difference of two OrderedSets as a new set.
That is, the new set will contain all elements that are in exactly
one of the sets.
Their order will be preserved, with elements from `self` preceding
elements from `other`.
Example:
>>> this = OrderedSet([1, 4, 3, 5, 7])
>>> other = OrderedSet([9, 7, 1, 3, 2])
>>> this.symmetric_difference(other)
OrderedSet([4, 5, 9, 2])
"""
cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
diff1 = cls(self).difference(other)
diff2 = cls(other).difference(self)
return diff1.union(diff2)
def _update_items(self, items):
"""
Replace the 'items' list of this OrderedSet with a new one, updating
self.map accordingly.
"""
self.items = items
self.map = {item: idx for (idx, item) in enumerate(items)}
def difference_update(self, *sets):
"""
Update this OrderedSet to remove items from one or more other sets.
Example:
>>> this = OrderedSet([1, 2, 3])
>>> this.difference_update(OrderedSet([2, 4]))
>>> print(this)
OrderedSet([1, 3])
>>> this = OrderedSet([1, 2, 3, 4, 5])
>>> this.difference_update(OrderedSet([2, 4]), OrderedSet([1, 4, 6]))
>>> print(this)
OrderedSet([3, 5])
"""
items_to_remove = set()
for other in sets:
items_to_remove |= set(other)
self._update_items([item for item in self.items if item not in items_to_remove])
def intersection_update(self, other):
"""
Update this OrderedSet to keep only items in another set, preserving
their order in this set.
Example:
>>> this = OrderedSet([1, 4, 3, 5, 7])
>>> other = OrderedSet([9, 7, 1, 3, 2])
>>> this.intersection_update(other)
>>> print(this)
OrderedSet([1, 3, 7])
"""
other = set(other)
self._update_items([item for item in self.items if item in other])
def symmetric_difference_update(self, other):
"""
Update this OrderedSet to remove items from another set, then
add items from the other set that were not present in this set.
Example:
>>> this = OrderedSet([1, 4, 3, 5, 7])
>>> other = OrderedSet([9, 7, 1, 3, 2])
>>> this.symmetric_difference_update(other)
>>> print(this)
OrderedSet([4, 5, 9, 2])
"""
items_to_add = [item for item in other if item not in self]
items_to_remove = set(other)
self._update_items(
[item for item in self.items if item not in items_to_remove] + items_to_add
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor/pyparsing.py
|
# module pyparsing.py
#
# Copyright (c) 2003-2018 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
=============================================================================
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements
(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
L{Literal} expressions)::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
Getting Started -
-----------------
Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing
classes inherit from. Use the docstrings for examples of how to:
- construct literal match expressions from L{Literal} and L{CaselessLiteral} classes
- construct character word-group expressions using the L{Word} class
- see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes
- use L{'+'<And>}, L{'|'<MatchFirst>}, L{'^'<Or>}, and L{'&'<Each>} operators to combine simple expressions into more complex ones
- associate names with your parsed results using L{ParserElement.setResultsName}
- find some helpful expression short-cuts like L{delimitedList} and L{oneOf}
- find more useful common expressions in the L{pyparsing_common} namespace class
"""
__version__ = "2.2.1"
__versionTime__ = "18 Sep 2018 00:49 UTC"
__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
import traceback
import types
from datetime import datetime
try:
from _thread import RLock
except ImportError:
from threading import RLock
try:
# Python 3
from collections.abc import Iterable
from collections.abc import MutableMapping
except ImportError:
# Python 2.7
from collections import Iterable
from collections import MutableMapping
try:
from collections import OrderedDict as _OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict as _OrderedDict
except ImportError:
_OrderedDict = None
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
'CloseMatch', 'tokenMap', 'pyparsing_common',
]
system_version = tuple(sys.version_info)[:3]
PY_3 = system_version[0] == 3
if PY_3:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
# build list of single arg builtins, that can be used as parse actions
singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# Else encode it
ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
xmlcharref = Regex(r'&#\d+;')
xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
return xmlcharref.transformString(ret)
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len sorted reversed list tuple set any all min max".split():
try:
singleArgBuiltins.append(getattr(__builtin__,fname))
except AttributeError:
continue
_generatorType = type((y for y in range(1)))
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
alphas = string.ascii_uppercase + string.ascii_lowercase
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
self.args = (pstr, loc, msg)
@classmethod
def _from_exception(cls, pe):
"""
internal factory method to simplify creating one type of ParseException
from another - avoids having __init__ signature conflicts among subclasses
"""
return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join((line_str[:line_column],
markerString, line_str[line_column:]))
return line_str.strip()
def __dir__(self):
return "lineno col line".split() + dir(type(self))
class ParseException(ParseBaseException):
"""
Exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
Example::
try:
Word(nums).setName("integer").parseString("ABC")
except ParseException as pe:
print(pe)
print("column: {}".format(pe.col))
prints::
Expected integer (at char 0), (line:1, col:1)
column: 1
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like L{ParseFatalException}, but thrown internally when an
L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop
immediately because an unbacktrackable syntax error has been found"""
pass
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup[0])
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""
Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})
Example::
integer = Word(nums)
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
# date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
# parseString returns a ParseResults object
result = date_str.parseString("1999/12/31")
def test(s, fn=repr):
print("%s -> %s" % (s, fn(eval(s))))
test("list(result)")
test("result[0]")
test("result['month']")
test("result.day")
test("'month' in result")
test("'minutes' in result")
test("result.dump()", str)
prints::
list(result) -> ['1999', '/', '12', '/', '31']
result[0] -> '1999'
result['month'] -> '12'
result.day -> '31'
'month' in result -> True
'minutes' in result -> False
result.dump() -> ['1999', '/', '12', '/', '31']
- day: 31
- month: 12
- year: 1999
"""
def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
self.__asList = asList
self.__modal = modal
if toklist is None:
toklist = []
if isinstance(toklist, list):
self.__toklist = toklist[:]
elif isinstance(toklist, _generatorType):
self.__toklist = list(toklist)
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v, isinstance=isinstance ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,(int,slice)):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name,occurrences in self.__tokdict.items():
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return ( not not self.__toklist )
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( self.__toklist[::-1] )
def _iterkeys( self ):
if hasattr(self.__tokdict, "iterkeys"):
return self.__tokdict.iterkeys()
else:
return iter(self.__tokdict)
def _itervalues( self ):
return (self[k] for k in self._iterkeys())
def _iteritems( self ):
return ((k, self[k]) for k in self._iterkeys())
if PY_3:
keys = _iterkeys
"""Returns an iterator of all named result keys (Python 3.x only)."""
values = _itervalues
"""Returns an iterator of all named result values (Python 3.x only)."""
items = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 3.x only)."""
else:
iterkeys = _iterkeys
"""Returns an iterator of all named result keys (Python 2.x only)."""
itervalues = _itervalues
"""Returns an iterator of all named result values (Python 2.x only)."""
iteritems = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 2.x only)."""
def keys( self ):
"""Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iterkeys())
def values( self ):
"""Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.itervalues())
def items( self ):
"""Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iteritems())
def haskeys( self ):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict)
def pop( self, *args, **kwargs):
"""
Removes and returns item at specified index (default=C{last}).
Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
argument or an integer argument, it will use C{list} semantics
and pop tokens from the list of parsed tokens. If passed a
non-integer argument (most likely a string), it will use C{dict}
semantics and pop the corresponding value from any defined
results names. A second default return value argument is
supported, just as in C{dict.pop()}.
Example::
def remove_first(tokens):
tokens.pop(0)
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
label = Word(alphas)
patt = label("LABEL") + OneOrMore(Word(nums))
print(patt.parseString("AAB 123 321").dump())
# Use pop() in a parse action to remove named result (note that corresponding value is not
# removed from list form of results)
def remove_LABEL(tokens):
tokens.pop("LABEL")
return tokens
patt.addParseAction(remove_LABEL)
print(patt.parseString("AAB 123 321").dump())
prints::
['AAB', '123', '321']
- LABEL: AAB
['AAB', '123', '321']
"""
if not args:
args = [-1]
for k,v in kwargs.items():
if k == 'default':
args = (args[0], v)
else:
raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
if (isinstance(args[0], int) or
len(args) == 1 or
args[0] in self):
index = args[0]
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, defaultValue=None):
"""
Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified.
Similar to C{dict.get()}.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString("1999/12/31")
print(result.get("year")) # -> '1999'
print(result.get("hour", "not specified")) # -> 'not specified'
print(result.get("hour")) # -> None
"""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""
Inserts new element at location index in the list of parsed tokens.
Similar to C{list.insert()}.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to insert the parse location in the front of the parsed results
def insert_locn(locn, tokens):
tokens.insert(0, locn)
print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
"""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name,occurrences in self.__tokdict.items():
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def append( self, item ):
"""
Add single element to end of ParseResults list of elements.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to compute the sum of the parsed integers, and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
"""
self.__toklist.append(item)
def extend( self, itemseq ):
"""
Add sequence of elements to end of ParseResults list of elements.
Example::
patt = OneOrMore(Word(alphas))
# use a parse action to append the reverse of the matched strings, to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
return ''.join(tokens)
print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
"""
if isinstance(itemseq, ParseResults):
self += itemseq
else:
self.__toklist.extend(itemseq)
def clear( self ):
"""
Clear all elements and results names.
"""
del self.__toklist[:]
self.__tokdict.clear()
def __getattr__( self, name ):
try:
return self[name]
except KeyError:
return ""
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = lambda a: offset if a<0 else a+offset
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
# useful for merging many ParseResults using sum() builtin
return self.copy()
else:
# this may raise a TypeError - so be it
return other + self
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""
Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = OneOrMore(Word(alphas))
result = patt.parseString("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use asList() to create an actual list
result_list = result.asList()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
"""
return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
def asDict( self ):
"""
Returns the named parse results as a nested dictionary.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
result_dict = result.asDict()
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
import json
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
"""
if PY_3:
item_fn = self.items
else:
item_fn = self.iteritems
def toItem(obj):
if isinstance(obj, ParseResults):
if obj.haskeys():
return obj.asDict()
else:
return [toItem(v) for v in obj]
else:
return obj
return dict((k,toItem(v)) for k,v in item_fn())
def copy( self ):
"""
Returns a new copy of a C{ParseResults} object.
"""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""
(Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
"""
nl = "\n"
out = []
namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
for i,res in enumerate(self.__toklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
r"""
Returns the results name for this token expression. Useful when several
different expressions might match at a particular location.
Example::
integer = Word(nums)
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
house_number_expr = Suppress('#') + Word(nums, alphanums)
user_data = (Group(house_number_expr)("house_number")
| Group(ssn_expr)("ssn")
| Group(integer)("age"))
user_info = OneOrMore(user_data)
result = user_info.parseString("22 111-22-3333 #221B")
for item in result:
print(item.getName(), ':', item[0])
prints::
age : 22
ssn : 111-22-3333
house_number : 221B
"""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
return next(iter(self.__tokdict.keys()))
else:
return None
def dump(self, indent='', depth=0, full=True):
"""
Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(result.dump())
prints::
['12', '/', '31', '/', '1999']
- day: 1999
- month: 31
- year: 12
"""
out = []
NL = '\n'
out.append( indent+_ustr(self.asList()) )
if full:
if self.haskeys():
items = sorted((str(k), v) for k,v in self.items())
for k,v in items:
if out:
out.append(NL)
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v:
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(repr(v))
elif any(isinstance(vv,ParseResults) for vv in self):
v = self
for i,vv in enumerate(v):
if isinstance(vv,ParseResults):
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) ))
else:
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv)))
return "".join(out)
def pprint(self, *args, **kwargs):
"""
Pretty-printer for parsed results as a list, using the C{pprint} module.
Accepts additional positional or keyword args as defined for the
C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
Example::
ident = Word(alphas, alphanums)
num = Word(nums)
func = Forward()
term = ident | num | Group('(' + func + ')')
func <<= ident + Group(Optional(delimitedList(term)))
result = func.parseString("fna a,b,(fnb c,d,200),100")
result.pprint(width=40)
prints::
['fna',
['a',
'b',
['(', 'fnb', ['c', 'd', '200'], ')'],
'100']]
"""
pprint.pprint(self.asList(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
(self.__tokdict,
par,
inAccumNames,
self.__name) = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __getnewargs__(self):
return self.__toklist, self.__name, self.__asList, self.__modal
def __dir__(self):
return (dir(type(self)) + list(self.keys()))
MutableMapping.register(ParseResults)
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
s = strg
return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
#~ 'decorator to trim function calls to match the arity of the target'
#~ def _trim_arity(func, maxargs=3):
#~ if func in singleArgBuiltins:
#~ return lambda s,l,t: func(t)
#~ limit = 0
#~ foundArity = False
#~ def wrapper(*args):
#~ nonlocal limit,foundArity
#~ while 1:
#~ try:
#~ ret = func(*args[limit:])
#~ foundArity = True
#~ return ret
#~ except TypeError:
#~ if limit == maxargs or foundArity:
#~ raise
#~ limit += 1
#~ continue
#~ return wrapper
# this version is Python 2.x-3.x cross-compatible
'decorator to trim function calls to match the arity of the target'
def _trim_arity(func, maxargs=2):
if func in singleArgBuiltins:
return lambda s,l,t: func(t)
limit = [0]
foundArity = [False]
# traceback return data structure changed in Py3.5 - normalize back to plain tuples
if system_version[:2] >= (3,5):
def extract_stack(limit=0):
# special handling for Python 3.5.0 - extra deep call stack by 1
offset = -3 if system_version == (3,5,0) else -2
frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
return [frame_summary[:2]]
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[-1]
return [frame_summary[:2]]
else:
extract_stack = traceback.extract_stack
extract_tb = traceback.extract_tb
# synthesize what would be returned by traceback.extract_stack at the call to
# user's parse action 'func', so that we don't incur call penalty at parse time
LINE_DIFF = 6
# IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
# THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
this_line = extract_stack(limit=2)[-1]
pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0]:])
foundArity[0] = True
return ret
except TypeError:
# re-raise TypeErrors if they did not come from our arity testing
if foundArity[0]:
raise
else:
try:
tb = sys.exc_info()[-1]
if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
raise
finally:
del tb
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
# copy func name to wrapper for sensible debug output
func_name = "<parse action>"
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
wrapper.__name__ = func_name
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
@staticmethod
def setDefaultWhitespaceChars( chars ):
r"""
Overrides the default whitespace chars
Example::
# default whitespace chars are space, <TAB> and newline
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
# change to just treat newline as significant
ParserElement.setDefaultWhitespaceChars(" \t")
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
@staticmethod
def inlineLiteralsUsing(cls):
"""
Set class to be used for inclusion of string literals into a parser.
Example::
# default literal class used is Literal
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# change to Suppress
ParserElement.inlineLiteralsUsing(Suppress)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
"""
ParserElement._literalStringClass = cls
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""
Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element.
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of C{expr.copy()} is just C{expr()}::
integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
"""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""
Define name for this expression, makes debugging and exception messages clearer.
Example::
Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
"""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""
Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
Example::
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
"""
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches=True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction( self, *fns, **kwargs ):
"""
Define one or more actions to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Optional keyword arguments:
- callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
Example::
integer = Word(nums)
date_str = integer + '/' + integer + '/' + integer
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# use parse action to convert to ints at parse time
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
date_str = integer + '/' + integer + '/' + integer
# note that integer fields are now ints, not strings
date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
"""
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = kwargs.get("callDuringTry", False)
return self
def addParseAction( self, *fns, **kwargs ):
"""
Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
See examples in L{I{copy}<copy>}.
"""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def addCondition(self, *fns, **kwargs):
"""Add a boolean predicate function to expression's list of parse actions. See
L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction},
functions passed to C{addCondition} need to return boolean success/fail of the condition.
Optional keyword arguments:
- message = define a custom message to be used in the raised exception
- fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
year_int = integer.copy()
year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
date_str = year_int + '/' + integer + '/' + integer
result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
"""
msg = kwargs.get("message", "failed user-defined condition")
exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
for fn in fns:
def pa(s,l,t):
if not bool(_trim_arity(fn)(s,l,t)):
raise exc_type(s,l,msg)
self.parseAction.append(pa)
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{L{ParseFatalException}}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException as err:
#~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or preloc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException as err:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
def canParseNext(self, instring, loc):
try:
self.tryParse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
class _UnboundedCache(object):
def __init__(self):
cache = {}
self.not_in_cache = not_in_cache = object()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
def clear(self):
cache.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
if _OrderedDict is not None:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = _OrderedDict()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
while len(cache) > size:
try:
cache.popitem(False)
except KeyError:
pass
def clear(self):
cache.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
else:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = {}
key_fifo = collections.deque([], size)
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
while len(key_fifo) > size:
cache.pop(key_fifo.popleft(), None)
key_fifo.append(key)
def clear(self):
cache.clear()
key_fifo.clear()
def cache_len(self):
return len(cache)
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
self.__len__ = types.MethodType(cache_len, self)
# argument cache for optimizing repeated calls when backtracking through recursive expressions
packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
packrat_cache_lock = RLock()
packrat_cache_stats = [0, 0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
HIT, MISS = 0, 1
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if value is cache.not_in_cache:
ParserElement.packrat_cache_stats[MISS] += 1
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
except ParseBaseException as pe:
# cache a copy of the exception, without the traceback
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy()))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if isinstance(value, Exception):
raise value
return (value[0], value[1].copy())
_parse = _parseNoCache
@staticmethod
def resetCache():
ParserElement.packrat_cache.clear()
ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
_packratEnabled = False
@staticmethod
def enablePackrat(cache_size_limit=128):
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
Parameters:
- cache_size_limit - (default=C{128}) - if an integer value is provided
will limit the size of the packrat cache; if None is passed, then
the cache size will be unbounded; if 0 is passed, the cache will
be effectively disabled.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
Example::
import pyparsing
pyparsing.ParserElement.enablePackrat()
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
if cache_size_limit is None:
ParserElement.packrat_cache = ParserElement._UnboundedCache()
else:
ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache
def parseString( self, instring, parseAll=False ):
"""
Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{L{StringEnd()}}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
Example::
Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
loc = self.preParse( instring, loc )
se = Empty() + StringEnd()
se._parse( instring, loc )
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens,start,end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn( instring, loc )
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def transformString( self, instring ):
"""
Extension to C{L{scanString}}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.setParseAction(lambda toks: toks[0].title())
print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
Prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
"""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""
Another extension to C{L{scanString}}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
# the sum() builtin can be used to merge results into a single ParseResults object
print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
prints::
[['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
"""
Generator method to split a string using the given expression as a separator.
May be called with optional C{maxsplit} argument, to limit the number of splits;
and the optional C{includeSeparators} argument (default=C{False}), if the separating
matching text should be included in the split results.
Example::
punc = oneOf(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
"""
splits = 0
last = 0
for t,s,e in self.scanString(instring, maxMatches=maxsplit):
yield instring[last:s]
if includeSeparators:
yield t[0]
last = e
yield instring[last:]
def __add__(self, other ):
"""
Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
converts them to L{Literal}s by default.
Example::
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
Prints::
Hello, World! -> ['Hello', ',', 'World', '!']
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""
Implementation of + operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""
Implementation of - operator, returns C{L{And}} with error stop
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return self + And._ErrorStop() + other
def __rsub__(self, other ):
"""
Implementation of - operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""
Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + L{ZeroOrMore}(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
- C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""
Implementation of | operator - returns C{L{MatchFirst}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""
Implementation of | operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""
Implementation of ^ operator - returns C{L{Or}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""
Implementation of ^ operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""
Implementation of & operator - returns C{L{Each}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""
Implementation of & operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""
Implementation of ~ operator - returns C{L{NotAny}}
"""
return NotAny( self )
def __call__(self, name=None):
"""
Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
passed as C{True}.
If C{name} is omitted, same as calling C{L{copy}}.
Example::
# these are equivalent
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
if name is not None:
return self.setResultsName(name)
else:
return self.copy()
def suppress( self ):
"""
Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""
Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""
Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match C{<TAB>} characters.
"""
self.keepTabs = True
return self
def ignore( self, other ):
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
Example::
patt = OneOrMore(Word(alphas))
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
patt.ignore(cStyleComment)
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
"""
if isinstance(other, basestring):
other = Suppress(other)
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""
Enable display of debugging messages while doing pattern matching.
"""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""
Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable.
Example::
wd = Word(alphas).setName("alphaword")
integer = Word(nums).setName("numword")
term = wd | integer
# turn on debugging for wd
wd.setDebug()
OneOrMore(term).parseString("abc 123 xyz 890")
prints::
Match alphaword at loc 0(1,1)
Matched alphaword -> ['abc']
Match alphaword at loc 3(1,4)
Exception raised:Expected alphaword (at char 4), (line:1, col:5)
Match alphaword at loc 7(1,8)
Matched alphaword -> ['xyz']
Match alphaword at loc 11(1,12)
Exception raised:Expected alphaword (at char 12), (line:1, col:13)
Match alphaword at loc 15(1,16)
Exception raised:Expected alphaword (at char 15), (line:1, col:16)
The output shown is that produced by the default debug actions - custom debug actions can be
specified using L{setDebugActions}. Prior to attempting
to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
which makes debugging and exception messages easier to understand - for instance, the default
name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
"""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""
Check defined expressions for valid structure, check for infinite recursive definitions.
"""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""
Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
with open(file_or_filename, "r") as f:
file_contents = f.read()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or vars(self) == vars(other)
elif isinstance(other, basestring):
return self.matches(other)
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
def matches(self, testString, parseAll=True):
"""
Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser.
Parameters:
- testString - to test against this expression for a match
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
Example::
expr = Word(nums)
assert expr.matches("100")
"""
try:
self.parseString(_ustr(testString), parseAll=parseAll)
return True
except ParseBaseException:
return False
def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
"""
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- tests - a list of separate test strings, or a multiline string of test strings
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
- comment - (default=C{'#'}) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- printResults - (default=C{True}) prints test output to stdout
- failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
Returns: a (success, results) tuple, where success indicates that all tests succeeded
(or failed if C{failureTests} is True), and the results contain a list of lines of each
test's output
Example::
number_expr = pyparsing_common.number.copy()
result = number_expr.runTests('''
# unsigned integer
100
# negative integer
-100
# float with scientific notation
6.02e23
# integer with scientific notation
1e-12
''')
print("Success" if result[0] else "Failed!")
result = number_expr.runTests('''
# stray character
100Z
# missing leading digit before '.'
-.100
# too many '.'
3.14.159
''', failureTests=True)
print("Success" if result[0] else "Failed!")
prints::
# unsigned integer
100
[100]
# negative integer
-100
[-100]
# float with scientific notation
6.02e23
[6.02e+23]
# integer with scientific notation
1e-12
[1e-12]
Success
# stray character
100Z
^
FAIL: Expected end of text (at char 3), (line:1, col:4)
# missing leading digit before '.'
-.100
^
FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
# too many '.'
3.14.159
^
FAIL: Expected end of text (at char 4), (line:1, col:5)
Success
Each test string must be on a single line. If you want to test a string that spans multiple
lines, create a test like this::
expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
(Note that this is a raw string literal, you must include the leading 'r'.)
"""
if isinstance(tests, basestring):
tests = list(map(str.strip, tests.rstrip().splitlines()))
if isinstance(comment, basestring):
comment = Literal(comment)
allResults = []
comments = []
success = True
for t in tests:
if comment is not None and comment.matches(t, False) or comments and not t:
comments.append(t)
continue
if not t:
continue
out = ['\n'.join(comments), t]
comments = []
try:
t = t.replace(r'\n','\n')
result = self.parseString(t, parseAll=parseAll)
out.append(result.dump(full=fullDump))
success = success and not failureTests
except ParseBaseException as pe:
fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
if '\n' in t:
out.append(line(pe.loc, t))
out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
else:
out.append(' '*pe.loc + '^' + fatal)
out.append("FAIL: " + str(pe))
success = success and failureTests
result = pe
except Exception as exc:
out.append("FAIL-EXCEPTION: " + str(exc))
success = success and failureTests
result = exc
if printResults:
if fullDump:
out.append('')
print('\n'.join(out))
allResults.append((t, result))
return success, allResults
class Token(ParserElement):
"""
Abstract C{ParserElement} subclass, for defining atomic matching patterns.
"""
def __init__( self ):
super(Token,self).__init__( savelist=False )
class Empty(Token):
"""
An empty token, will always match.
"""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""
A token that will never match.
"""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl( self, instring, loc, doActions=True ):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""
Token to exactly match a specified string.
Example::
Literal('blah').parseString('blah') # -> ['blah']
Literal('blah').parseString('blahfooblah') # -> ['blah']
Literal('blah').parseString('bla') # -> Exception: Expected "blah"
For case-insensitive matching, use L{CaselessLiteral}.
For keyword matching (force word break before and after the matched string),
use L{Keyword} or L{CaselessKeyword}.
"""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement._literalStringClass = Literal
class Keyword(Token):
"""
Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{L{Literal}}:
- C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
- C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
Accepts two optional constructor arguments in addition to the keyword string:
- C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"
- C{caseless} allows case-insensitive matching, default is C{False}.
Example::
Keyword("start").parseString("start") # -> ['start']
Keyword("start").parseString("starting") # -> Exception
For case-insensitive matching, use L{CaselessKeyword}.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=None, caseless=False ):
super(Keyword,self).__init__()
if identChars is None:
identChars = Keyword.DEFAULT_KEYWORD_CHARS
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
@staticmethod
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
class CaselessLiteral(Literal):
"""
Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
Example::
OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
(Contrast with example for L{CaselessKeyword}.)
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
"""
Caseless version of L{Keyword}.
Example::
OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
(Contrast with example for L{CaselessLiteral}.)
"""
def __init__( self, matchString, identChars=None ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class CloseMatch(Token):
"""
A variation on L{Literal} which matches "close" matches, that is,
strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
- C{match_string} - string to be matched
- C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
The results from a successful parse will contain the matched text from the input string and the following named results:
- C{mismatches} - a list of the positions within the match_string where mismatches were found
- C{original} - the original match_string used to compare against the input string
If C{mismatches} is an empty list, then the match was an exact match.
Example::
patt = CloseMatch("ATCATCGAATGGA")
patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
# exact match
patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
# close match allowing up to 2 mismatches
patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
"""
def __init__(self, match_string, maxMismatches=1):
super(CloseMatch,self).__init__()
self.name = match_string
self.match_string = match_string
self.maxMismatches = maxMismatches
self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
self.mayIndexError = False
self.mayReturnEmpty = False
def parseImpl( self, instring, loc, doActions=True ):
start = loc
instrlen = len(instring)
maxloc = start + len(self.match_string)
if maxloc <= instrlen:
match_string = self.match_string
match_stringloc = 0
mismatches = []
maxMismatches = self.maxMismatches
for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
src,mat = s_m
if src != mat:
mismatches.append(match_stringloc)
if len(mismatches) > maxMismatches:
break
else:
loc = match_stringloc + 1
results = ParseResults([instring[start:loc]])
results['original'] = self.match_string
results['mismatches'] = mismatches
return loc, results
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""
Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction. An optional
C{excludeChars} parameter can list characters that might be found in
the input C{bodyChars} string; useful to define a word of all printables
except for one or two characters, for instance.
L{srange} is useful for defining custom character set strings for defining
C{Word} expressions, using range notation from regular expression character sets.
A common mistake is to use C{Word} to match a specific literal string, as in
C{Word("Address")}. Remember that C{Word} uses the string argument to define
I{sets} of matchable characters. This expression would match "Add", "AAA",
"dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
To match an exact literal string, use L{Literal} or L{Keyword}.
pyparsing includes helper strings for building Words:
- L{alphas}
- L{nums}
- L{alphanums}
- L{hexnums}
- L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
- L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
- L{printables} (any non-whitespace character)
Example::
# a word composed of digits
integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
# a word with a leading capital, and zero or more lowercase
capital_word = Word(alphas.upper(), alphas.lower())
# hostnames are alphanumeric, with leading alpha, and '-'
hostname = Word(alphas, alphanums+'-')
# roman numeral (not a strict parser, accepts invalid mix of characters)
roman = Word("IVXLCDM")
# any string of non-whitespace characters, except for ','
csv_value = Word(printables, excludeChars=",")
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
super(Word,self).__init__()
if excludeChars:
initChars = ''.join(c for c in initChars if c not in excludeChars)
if bodyChars:
bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.initCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except Exception:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
if not(instring[ loc ] in self.initChars):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except Exception:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
r"""
Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as
named parse results.
Example::
realnum = Regex(r"[+-]?\d+\.\d*")
date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
# ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if not pattern:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
r"""
Token for matching strings that are delimited by quoting characters.
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=C{None})
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
- multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
- convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
Example::
qs = QuotedString('"')
print(qs.searchString('lsjdf "This is the quote" sldjf'))
complex_qs = QuotedString('{{', endQuoteChar='}}')
print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
sql_qs = QuotedString('"', escQuote='""')
print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
prints::
[['This is the quote']]
[['This is the "quote"']]
[['This is the quote with "embedded" quotes']]
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if not quoteChar:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if not endQuoteChar:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
self.convertWhitespaceEscapes = convertWhitespaceEscapes
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped whitespace
if '\\' in ret and self.convertWhitespaceEscapes:
ws_map = {
r'\t' : '\t',
r'\n' : '\n',
r'\f' : '\f',
r'\r' : '\r',
}
for wslit,wschar in ws_map.items():
ret = ret.replace(wslit, wschar)
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""
Token for matching words composed of characters I{not} in a given set (will
include whitespace in matched characters if not listed in the provided exclusion set - see example).
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
Example::
# define a comma-separated-value as anything that is not a ','
csv_value = CharsNotIn(',')
print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
prints::
['dkls', 'lsdkjf', 's12 34', '@!#', '213']
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except Exception:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""
Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{L{Word}} class.
"""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
#~ self.leaveWhitespace()
self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""
Token to advance to a specific column of input text; useful for tabular report scraping.
"""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""
Matches if current position is at the beginning of a line within the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
print(t)
Prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__( self ):
super(LineStart,self).__init__()
self.errmsg = "Expected start of line"
def parseImpl( self, instring, loc, doActions=True ):
if col(loc, instring) == 1:
return loc, []
raise ParseException(instring, loc, self.errmsg, self)
class LineEnd(_PositionToken):
"""
Matches if current position is at the end of a line within the parse string
"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(_PositionToken):
"""
Matches if current position is at the beginning of the parse string
"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(_PositionToken):
"""
Matches if current position is at the end of the parse string
"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(_PositionToken):
"""
Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(_PositionToken):
"""
Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""
Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
"""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, _generatorType ):
exprs = list(exprs)
if isinstance( exprs, basestring ):
self.exprs = [ ParserElement._literalStringClass( exprs ) ]
elif isinstance( exprs, Iterable ):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if all(isinstance(expr, basestring) for expr in exprs):
exprs = map(ParserElement._literalStringClass, exprs)
self.exprs = list(exprs)
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = "Expected " + _ustr(self)
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
def copy(self):
ret = super(ParseExpression,self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
class And(ParseExpression):
"""
Requires all given C{ParseExpression}s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the C{'+'} operator.
May also be constructed using the C{'-'} operator, which will suppress backtracking.
Example::
integer = Word(nums)
name_expr = OneOrMore(Word(alphas))
expr = And([integer("id"),name_expr("name"),integer("age")])
# more easily written as:
expr = integer("id") + name_expr("name") + integer("age")
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(And._ErrorStop,self).__init__(*args, **kwargs)
self.name = '-'
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.setWhitespaceChars( self.exprs[0].whiteChars )
self.skipWhitespace = self.exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException._from_exception(pe)
except IndexError:
raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
class Or(ParseExpression):
"""
Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the C{'^'} operator.
Example::
# construct Or using '^' operator
number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789"))
prints::
[['123'], ['3.1416'], ['789']]
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
matches = []
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
# save match among all matches, to retry longest to shortest
matches.append((loc2, e))
if matches:
matches.sort(key=lambda x: -x[0])
for _,e in matches:
try:
return e._parse( instring, loc, doActions )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""
Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the C{'|'} operator.
Example::
# construct MatchFirst using '|' operator
# watch the order of expressions to match
number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
# put more selective expression first
number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""
Requires all given C{ParseExpression}s to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the C{'&'} operator.
Example::
color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
integer = Word(nums)
shape_attr = "shape:" + shape_type("shape")
posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
color_attr = "color:" + color("color")
size_attr = "size:" + integer("size")
# use Each (using operator '&') to accept attributes in any order
# (shape and posn are required, color and size are optional)
shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
shape_spec.runTests('''
shape: SQUARE color: BLACK posn: 100, 120
shape: CIRCLE size: 50 color: BLUE posn: 50,80
color:GREEN size:20 shape:TRIANGLE posn:20,40
'''
)
prints::
shape: SQUARE color: BLACK posn: 100, 120
['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- color: BLACK
- posn: ['100', ',', '120']
- x: 100
- y: 120
- shape: SQUARE
shape: CIRCLE size: 50 color: BLUE posn: 50,80
['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- color: BLUE
- posn: ['50', ',', '80']
- x: 50
- y: 80
- shape: CIRCLE
- size: 50
color: GREEN size: 20 shape: TRIANGLE posn: 20,40
['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- color: GREEN
- posn: ['20', ',', '40']
- x: 20
- y: 40
- shape: TRIANGLE
- size: 20
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e),e))
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join(_ustr(e) for e in tmpReqd)
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = sum(resultlist, ParseResults([]))
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""
Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
"""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
if issubclass(ParserElement._literalStringClass, Token):
expr = ParserElement._literalStringClass(expr)
else:
expr = ParserElement._literalStringClass(Literal(expr))
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except Exception:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""
Lookahead matching of the given parse expression. C{FollowedBy}
does I{not} advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list.
Example::
# use FollowedBy to match a label only if it is followed by a ':'
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
prints::
[['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
"""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""
Lookahead to disallow matching with the given parse expression. C{NotAny}
does I{not} advance the parsing position within the input string, it only
verifies that the specified parse expression does I{not} match at the current
position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator.
Example::
"""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr.canParseNext(instring, loc):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class _MultipleMatch(ParseElementEnhance):
def __init__( self, expr, stopOn=None):
super(_MultipleMatch, self).__init__(expr)
self.saveAsList = True
ender = stopOn
if isinstance(ender, basestring):
ender = ParserElement._literalStringClass(ender)
self.not_ender = ~ender if ender is not None else None
def parseImpl( self, instring, loc, doActions=True ):
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = self.not_ender is not None
if check_ender:
try_not_ender = self.not_ender.tryParse
# must be at least one (but first see if we are the stopOn sentinel;
# if so, fail)
if check_ender:
try_not_ender(instring, loc)
loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = (not not self.ignoreExprs)
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self_expr_parse( instring, preloc, doActions )
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
class OneOrMore(_MultipleMatch):
"""
Repetition of one or more of the given expression.
Parameters:
- expr - expression that must match one or more times
- stopOn - (default=C{None}) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: BLACK"
OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
# use stopOn attribute for OneOrMore to avoid reading label string as part of the data
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
# could also be written as
(attr_expr * (1,)).parseString(text).pprint()
"""
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
class ZeroOrMore(_MultipleMatch):
"""
Optional repetition of zero or more of the given expression.
Parameters:
- expr - expression that must match zero or more times
- stopOn - (default=C{None}) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example: similar to L{OneOrMore}
"""
def __init__( self, expr, stopOn=None):
super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
except (ParseException,IndexError):
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""
Optional matching of the given expression.
Parameters:
- expr - expression that must match zero or more times
- default (optional) - value to be returned if the optional expression is not found.
Example::
# US postal code can be a 5-digit zip, plus optional 4-digit qualifier
zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
zip.runTests('''
# traditional ZIP code
12345
# ZIP+4 form
12101-0001
# invalid ZIP
98765-
''')
prints::
# traditional ZIP code
12345
['12345']
# ZIP+4 form
12101-0001
['12101-0001']
# invalid ZIP
98765-
^
FAIL: Expected end of text (at char 5), (line:1, col:6)
"""
def __init__( self, expr, default=_optionalNotMatched ):
super(Optional,self).__init__( expr, savelist=False )
self.saveAsList = self.expr.saveAsList
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""
Token for skipping over all undefined text until the matched expression is found.
Parameters:
- expr - target expression marking the end of the data to be skipped
- include - (default=C{False}) if True, the target expression is also parsed
(the skipped text and target expression are returned as a 2-element list).
- ignore - (default=C{None}) used to define grammars (typically quoted strings and
comments) that might contain false matches to the target expression
- failOn - (default=C{None}) define expressions that are not allowed to be
included in the skipped test; if found before the target expression is found,
the SkipTo is not a match
Example::
report = '''
Outstanding Issues Report - 1 Jan 2000
# | Severity | Description | Days Open
-----+----------+-------------------------------------------+-----------
101 | Critical | Intermittent system crash | 6
94 | Cosmetic | Spelling error on Login ('log|n') | 14
79 | Minor | System slow when running too many reports | 47
'''
integer = Word(nums)
SEP = Suppress('|')
# use SkipTo to simply match everything up until the next SEP
# - ignore quoted strings, so that a '|' character inside a quoted string does not match
# - parse action will call token.strip() for each matched token, i.e., the description body
string_data = SkipTo(SEP, ignore=quotedString)
string_data.setParseAction(tokenMap(str.strip))
ticket_expr = (integer("issue_num") + SEP
+ string_data("sev") + SEP
+ string_data("desc") + SEP
+ integer("days_open"))
for tkt in ticket_expr.searchString(report):
print tkt.dump()
prints::
['101', 'Critical', 'Intermittent system crash', '6']
- days_open: 6
- desc: Intermittent system crash
- issue_num: 101
- sev: Critical
['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- days_open: 14
- desc: Spelling error on Login ('log|n')
- issue_num: 94
- sev: Cosmetic
['79', 'Minor', 'System slow when running too many reports', '47']
- days_open: 47
- desc: System slow when running too many reports
- issue_num: 79
- sev: Minor
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if isinstance(failOn, basestring):
self.failOn = ParserElement._literalStringClass(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
startloc = loc
instrlen = len(instring)
expr = self.expr
expr_parse = self.expr._parse
self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
tmploc = loc
while tmploc <= instrlen:
if self_failOn_canParseNext is not None:
# break if failOn expression matches
if self_failOn_canParseNext(instring, tmploc):
break
if self_ignoreExpr_tryParse is not None:
# advance past ignore expressions
while 1:
try:
tmploc = self_ignoreExpr_tryParse(instring, tmploc)
except ParseBaseException:
break
try:
expr_parse(instring, tmploc, doActions=False, callPreParse=False)
except (ParseException, IndexError):
# no match, advance loc in string
tmploc += 1
else:
# matched skipto expr, done
break
else:
# ran off the end of the input string without matching skipto expr, fail
raise ParseException(instring, loc, self.errmsg, self)
# build up return values
loc = tmploc
skiptext = instring[startloc:loc]
skipresult = ParseResults(skiptext)
if self.includeMatch:
loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
skipresult += mat
return loc, skipresult
class Forward(ParseElementEnhance):
"""
Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
Converting to use the '<<=' operator instead will avoid this problem.
See L{ParseResults.pprint} for an example of a recursive parser created using
C{Forward}.
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass(other)
self.expr = other
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
def __ilshift__(self, other):
return self << other
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
return self.__class__.__name__ + ": ..."
# stubbed out for now - creates awful memory and perf issues
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret <<= self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""
Abstract subclass of C{ParseExpression}, for converting parsed results.
"""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Combine(TokenConverter):
"""
Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
Example::
real = Word(nums) + '.' + Word(nums)
print(real.parseString('3.1416')) # -> ['3', '.', '1416']
# will also erroneously match the following
print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
real = Combine(Word(nums) + '.' + Word(nums))
print(real.parseString('3.1416')) # -> ['3.1416']
# no match when there are internal spaces
print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and retToks.haskeys():
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""
Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
Example::
ident = Word(alphas)
num = Word(nums)
term = ident | num
func = ident + Optional(delimitedList(term))
print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100']
func = ident + Group(Optional(delimitedList(term)))
print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']]
"""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""
Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
# print attributes as plain groups
print(OneOrMore(attr_expr).parseString(text).dump())
# instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
print(result.dump())
# access named fields as dict entries, or output as dict
print(result['shape'])
print(result.asDict())
prints::
['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
{'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
See more examples at L{ParseResults} of accessing fields by results name.
"""
def __init__( self, expr ):
super(Dict,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""
Converter for ignoring the results of a parsed expression.
Example::
source = "a, b, c,d"
wd = Word(alphas)
wd_list1 = wd + ZeroOrMore(',' + wd)
print(wd_list1.parseString(source))
# often, delimiters that are useful during parsing are just in the
# way afterward - use Suppress to keep them out of the parsed output
wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
print(wd_list2.parseString(source))
prints::
['a', ',', 'b', ',', 'c', ',', 'd']
['a', 'b', 'c', 'd']
(See also L{delimitedList}.)
"""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""
Wrapper for parse actions, to ensure they are only called once.
"""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""
Decorator for debugging parse actions.
When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@traceParseAction
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens))))
wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
<<leaving remove_duplicate_chars (ret: 'dfjkls')
['dfjkls']
"""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.__name__
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""
Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to C{True}, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
Example::
delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr, intExpr=None ):
"""
Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
Example::
countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
# in this parser, the leading integer value is given in binary,
# '10' indicating that 2 values are in the array
binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = t[0]
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
def _flatten(L):
ret = []
for i in L:
if isinstance(i,list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do I{not} use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And(Literal(tt) for tt in tflat)
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def matchPreviousExpr(expr):
"""
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do I{not} use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""
Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{L{MatchFirst}} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a collection of string literals
- caseless - (default=C{False}) - treat all literals as caseless
- useRegex - (default=C{True}) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
Example::
comp_oper = oneOf("< = > <= >= !=")
var = Word(alphas)
number = Word(nums)
term = var | number
comparison_expr = term + comp_oper + term
print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
prints::
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
symbols = []
if isinstance(strs,basestring):
symbols = strs.split()
elif isinstance(strs, Iterable):
symbols = list(strs)
else:
warnings.warn("Invalid argument to oneOf, expected string or iterable",
SyntaxWarning, stacklevel=2)
if not symbols:
return NoMatch()
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
else:
return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
except Exception:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
def dictOf( key, value ):
"""
Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
Example::
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
print(OneOrMore(attr_expr).parseString(text).dump())
attr_label = label
attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
# similar to Dict, but simpler call format
result = dictOf(attr_label, attr_value).parseString(text)
print(result.dump())
print(result['shape'])
print(result.shape) # object attribute access works too
print(result.asDict())
prints::
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
SQUARE
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""
Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. By default, returns astring containing the original parsed text.
If the optional C{asString} argument is passed as C{False}, then the return value is a
C{L{ParseResults}} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{L{originalTextFor}} contains expressions with defined
results names, you must set C{asString} to C{False} if you want to preserve those
results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b","i"):
opener,closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
"""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
matchExpr.setParseAction(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
return matchExpr
def ungroup(expr):
"""
Helper to undo pyparsing's default grouping of And expressions, even
if all but one are non-empty.
"""
return TokenConverter(expr).setParseAction(lambda t:t[0])
def locatedExpr(expr):
"""
Helper to decorate a returned token with its starting and ending locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains C{<TAB>} characters, you may want to call
C{L{ParserElement.parseWithTabs}}
Example::
wd = Word(alphas)
for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[[0, 'ljsdf', 5]]
[[8, 'lksdjjf', 15]]
[[18, 'lkkjj', 23]]
"""
locator = Empty().setParseAction(lambda s,l,t: l)
return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
def srange(s):
r"""
Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be:
- a single character
- an escaped character with a leading backslash (such as C{\-} or C{\]})
- an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character)
(C{\0x##} is also supported for backwards compatibility)
- an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
- a range of any of the above, separated by a dash (C{'a-z'}, etc.)
- any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
"""
_expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
try:
return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
except Exception:
return ""
def matchOnlyAtCol(n):
"""
Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""
Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{L{transformString<ParserElement.transformString>}()}.
Example::
num = Word(nums).setParseAction(lambda toks: int(toks[0]))
na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
term = na | num
OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
"""
return lambda s,l,t: [replStr]
def removeQuotes(s,l,t):
"""
Helper parse action for removing quotation marks from parsed quoted strings.
Example::
# by default, quotation marks are included in parsed results
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
# use removeQuotes to strip quotation marks from parsed results
quotedString.setParseAction(removeQuotes)
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
"""
return t[0][1:-1]
def tokenMap(func, *args):
"""
Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional
args are passed, they are forwarded to the given function as additional arguments after
the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
parsed data to an integer using base 16.
Example (compare the last to example in L{ParserElement.transformString}::
hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
hex_ints.runTests('''
00 11 22 aa FF 0a 0d 1a
''')
upperword = Word(alphas).setParseAction(tokenMap(str.upper))
OneOrMore(upperword).runTests('''
my kingdom for a horse
''')
wd = Word(alphas).setParseAction(tokenMap(str.title))
OneOrMore(wd).setParseAction(' '.join).runTests('''
now is the winter of our discontent made glorious summer by this sun of york
''')
prints::
00 11 22 aa FF 0a 0d 1a
[0, 17, 34, 170, 255, 10, 13, 26]
my kingdom for a horse
['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
now is the winter of our discontent made glorious summer by this sun of york
['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
"""
def pa(s,l,t):
return [func(tokn, *args) for tokn in t]
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
pa.__name__ = func_name
return pa
upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join(c for c in printables if c not in ">")
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""
Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
Example::
text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
# makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
a,a_end = makeHTMLTags("A")
link_expr = a + SkipTo(a_end)("link_text") + a_end
for link in link_expr.searchString(text):
# attributes in the <A> tag (like "href" shown here) are also accessible as named results
print(link.link_text, '->', link.href)
prints::
pyparsing -> http://pyparsing.wikispaces.com
"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""
Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
tags only in the given upper/lower case.
Example: similar to L{makeHTMLTags}
"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""
Helper to create a validating parse action to be used with start tags created
with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
C{<TD>} or C{<DIV>}.
Call C{withAttribute} with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in C{(align="right")}, or
- as an explicit dict with C{**} operator, when an attribute name is also a Python
reserved word, as in C{**{"class":"Customer", "align":"right"}}
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
If just testing for C{class} (with or without a namespace), use C{L{withClass}}.
To verify that the attribute exists, but without specifying a value, pass
C{withAttribute.ANY_VALUE} as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = makeHTMLTags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().setParseAction(withAttribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
def withClass(classname, namespace=''):
"""
Simplified version of C{L{withAttribute}} when matching on a div class - made
difficult because C{class} is a reserved word in Python.
Example::
html = '''
<div>
Some text
<div class="grid">1 4 0 1 0</div>
<div class="graph">1,3 2,3 1,1</div>
<div>this <div> has no class</div>
</div>
'''
div,div_end = makeHTMLTags("div")
div_grid = div().setParseAction(withClass("grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
classattr = "%s:class" % namespace if namespace else "class"
return withAttribute(**{classattr : classname})
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
"""
Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions. The generated parser will also recognize the use
of parentheses to override operator precedences (see example below).
Note: if you define a deep operator list, you may see performance issues
when using infixNotation. See L{ParserElement.enablePackrat} for a
mechanism to potentially improve your parser performance.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted); if the parse action
is passed a tuple or list of functions, this is equivalent to
calling C{setParseAction(*fn)} (L{ParserElement.setParseAction})
- lpar - expression for matching left-parentheses (default=C{Suppress('(')})
- rpar - expression for matching right-parentheses (default=C{Suppress(')')})
Example::
# simple example of four-function arithmetic with ints and variable names
integer = pyparsing_common.signed_integer
varname = pyparsing_common.identifier
arith_expr = infixNotation(integer | varname,
[
('-', 1, opAssoc.RIGHT),
(oneOf('* /'), 2, opAssoc.LEFT),
(oneOf('+ -'), 2, opAssoc.LEFT),
])
arith_expr.runTests('''
5+3*6
(5+3)*6
-2--11
''', fullDump=False)
prints::
5+3*6
[[5, '+', [3, '*', 6]]]
(5+3)*6
[[[5, '+', 3], '*', 6]]
-2--11
[[['-', 2], '-', ['-', 11]]]
"""
ret = Forward()
lastExpr = baseExpr | ( lpar + ret + rpar )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward().setName(termName)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
if isinstance(pa, (tuple, list)):
matchExpr.setParseAction(*pa)
else:
matchExpr.setParseAction(pa)
thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
lastExpr = thisExpr
ret <<= lastExpr
return ret
operatorPrecedence = infixNotation
"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""
dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")
sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")
quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|
Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""
Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
- closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
- content - expression for items within the nested lists (default=C{None})
- ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the C{ignoreExpr} argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
The default is L{quotedString}, but if no expressions are to be ignored,
then pass C{None} for this argument.
Example::
data_type = oneOf("void int short long char float double")
decl_data_type = Combine(data_type + Optional(Word('*')))
ident = Word(alphas+'_', alphanums+'_')
number = pyparsing_common.number
arg = Group(decl_data_type + ident)
LPAR,RPAR = map(Suppress, "()")
code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
c_function = (decl_data_type("type")
+ ident("name")
+ LPAR + Optional(delimitedList(arg), [])("args") + RPAR
+ code_body("body"))
c_function.ignore(cStyleComment)
source_code = '''
int is_odd(int x) {
return (x%2);
}
int dec_to_hex(char hchar) {
if (hchar >= '0' && hchar <= '9') {
return (ord(hchar)-ord('0'));
} else {
return (10+ord(hchar)-ord('A'));
}
}
'''
for func in c_function.searchString(source_code):
print("%(name)s (%(type)s) args: %(args)s" % func)
prints::
is_odd (int) args: [['int', 'x']]
dec_to_hex (int) args: [['char', 'hchar']]
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
ret.setName('nested %s%s expression' % (opener,closer))
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""
Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=C{True})
A valid block must contain at least one C{blockStatement}.
Example::
data = '''
def A(z):
A1
B = 100
G = A2
A2
A3
B
def BB(a,b,c):
BB1
def BBA():
bba1
bba2
bba3
C
D
def spam(x,y):
def eggs(z):
pass
'''
indentStack = [1]
stmt = Forward()
identifier = Word(alphas, alphanums)
funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
func_body = indentedBlock(stmt, indentStack)
funcDef = Group( funcDecl + func_body )
rvalue = Forward()
funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
rvalue << (funcCall | identifier | Word(nums))
assignment = Group(identifier + "=" + rvalue)
stmt << ( funcDef | assignment | identifier )
module_body = OneOrMore(stmt)
parseTree = module_body.parseString(data)
parseTree.pprint()
prints::
[['def',
'A',
['(', 'z', ')'],
':',
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
'B',
['def',
'BB',
['(', 'a', 'b', 'c', ')'],
':',
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
'C',
'D',
['def',
'spam',
['(', 'x', 'y', ')'],
':',
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
PEER = Empty().setParseAction(checkPeerIndent).setName('')
UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr.setName('indented block')
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
def replaceHTMLEntity(t):
"""Helper parser action to replace common HTML entities with their special characters"""
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
"Comment of the form C{/* ... */}"
htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
"Comment of the form C{<!-- ... -->}"
restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
"Comment of the form C{// ... (to end of line)}"
cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")
"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"
javaStyleComment = cppStyleComment
"Same as C{L{cppStyleComment}}"
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
"Comment of the form C{# ... (to end of line)}"
_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.
This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""
# some other useful expressions - using lower-case class name since we are really using this as a namespace
class pyparsing_common:
"""
Here are some common low-level expressions that may be useful in jump-starting parser development:
- numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
- common L{programming identifiers<identifier>}
- network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
- ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
- L{UUID<uuid>}
- L{comma-separated list<comma_separated_list>}
Parse actions:
- C{L{convertToInteger}}
- C{L{convertToFloat}}
- C{L{convertToDate}}
- C{L{convertToDatetime}}
- C{L{stripHTMLTags}}
- C{L{upcaseTokens}}
- C{L{downcaseTokens}}
Example::
pyparsing_common.number.runTests('''
# any int or real number, returned as the appropriate type
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.fnumber.runTests('''
# any int or real number, returned as float
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.hex_integer.runTests('''
# hex numbers
100
FF
''')
pyparsing_common.fraction.runTests('''
# fractions
1/2
-3/4
''')
pyparsing_common.mixed_integer.runTests('''
# mixed fractions
1
1/2
-3/4
1-3/4
''')
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests('''
# uuid
12345678-1234-5678-1234-567812345678
''')
prints::
# any int or real number, returned as the appropriate type
100
[100]
-100
[-100]
+100
[100]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# any int or real number, returned as float
100
[100.0]
-100
[-100.0]
+100
[100.0]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# hex numbers
100
[256]
FF
[255]
# fractions
1/2
[0.5]
-3/4
[-0.75]
# mixed fractions
1
[1]
1/2
[0.5]
-3/4
[-0.75]
1-3/4
[1.75]
# uuid
12345678-1234-5678-1234-567812345678
[UUID('12345678-1234-5678-1234-567812345678')]
"""
convertToInteger = tokenMap(int)
"""
Parse action for converting parsed integers to Python int
"""
convertToFloat = tokenMap(float)
"""
Parse action for converting parsed numbers to Python float
"""
integer = Word(nums).setName("integer").setParseAction(convertToInteger)
"""expression that parses an unsigned integer, returns an int"""
hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))
"""expression that parses a hexadecimal integer, returns an int"""
signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
"""expression that parses an integer with optional leading sign, returns an int"""
fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
"""fractional expression of an integer divided by an integer, returns a float"""
fraction.addParseAction(lambda t: t[0]/t[-1])
mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
"""mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
mixed_integer.addParseAction(sum)
real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)
"""expression that parses a floating point number and returns a float"""
sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
"""expression that parses a floating point number with optional scientific notation and returns a float"""
# streamlining this expression makes the docs nicer-looking
number = (sci_real | real | signed_integer).streamline()
"""any numeric expression, returns the corresponding Python type"""
fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
"""any int or real number, returned as float"""
identifier = Word(alphas+'_', alphanums+'_').setName("identifier")
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
"IPv4 address (C{0.0.0.0 - 255.255.255.255})"
_ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
_full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")
_short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")
_short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
_mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
"IPv6 address (long, short, or mixed form)"
mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
@staticmethod
def convertToDate(fmt="%Y-%m-%d"):
"""
Helper to create a parse action for converting parsed date string to Python datetime.date
Params -
- fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})
Example::
date_expr = pyparsing_common.iso8601_date.copy()
date_expr.setParseAction(pyparsing_common.convertToDate())
print(date_expr.parseString("1999-12-31"))
prints::
[datetime.date(1999, 12, 31)]
"""
def cvt_fn(s,l,t):
try:
return datetime.strptime(t[0], fmt).date()
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
@staticmethod
def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
"""
Helper to create a parse action for converting parsed datetime string to Python datetime.datetime
Params -
- fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})
Example::
dt_expr = pyparsing_common.iso8601_datetime.copy()
dt_expr.setParseAction(pyparsing_common.convertToDatetime())
print(dt_expr.parseString("1999-12-31T23:59:59.999"))
prints::
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
"""
def cvt_fn(s,l,t):
try:
return datetime.strptime(t[0], fmt)
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
"ISO8601 date (C{yyyy-mm-dd})"
iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
"ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"
uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
"UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"
_html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
@staticmethod
def stripHTMLTags(s, l, tokens):
"""
Parse action to remove HTML tags from web page HTML source
Example::
# strip HTML links from normal text
text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
td,td_end = makeHTMLTags("TD")
table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
"""
return pyparsing_common._html_stripper.transformString(tokens[0])
_commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',')
+ Optional( White(" \t") ) ) ).streamline().setName("commaItem")
comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
"""Parse action to convert tokens to upper case."""
downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
"""Parse action to convert tokens to lower case."""
if __name__ == "__main__":
selectToken = CaselessLiteral("select")
fromToken = CaselessLiteral("from")
ident = Word(alphas, alphanums + "_$")
columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
columnNameList = Group(delimitedList(columnName)).setName("columns")
columnSpec = ('*' | columnNameList)
tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
tableNameList = Group(delimitedList(tableName)).setName("tables")
simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
# demo runTests method, including embedded comments in test string
simpleSQL.runTests("""
# '*' as column list and dotted table name
select * from SYS.XYZZY
# caseless match on "SELECT", and casts back to "select"
SELECT * from XYZZY, ABC
# list of column names, and mixed case SELECT keyword
Select AA,BB,CC from Sys.dual
# multiple tables
Select A, B, C from Sys.dual, Table2
# invalid SELECT keyword - should fail
Xelect A, B, C from Sys.dual
# incomplete command - should fail
Select
# invalid column name - should fail
Select ^^^ frox Sys.dual
""")
pyparsing_common.number.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
# any int or real number, returned as float
pyparsing_common.fnumber.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
pyparsing_common.hex_integer.runTests("""
100
FF
""")
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests("""
12345678-1234-5678-1234-567812345678
""")
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor/packaging/tags.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import
import distutils.util
try:
from importlib.machinery import EXTENSION_SUFFIXES
except ImportError: # pragma: no cover
import imp
EXTENSION_SUFFIXES = [x[0] for x in imp.get_suffixes()]
del imp
import platform
import re
import sys
import sysconfig
import warnings
INTERPRETER_SHORT_NAMES = {
"python": "py", # Generic.
"cpython": "cp",
"pypy": "pp",
"ironpython": "ip",
"jython": "jy",
}
_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
class Tag(object):
__slots__ = ["_interpreter", "_abi", "_platform"]
def __init__(self, interpreter, abi, platform):
self._interpreter = interpreter.lower()
self._abi = abi.lower()
self._platform = platform.lower()
@property
def interpreter(self):
return self._interpreter
@property
def abi(self):
return self._abi
@property
def platform(self):
return self._platform
def __eq__(self, other):
return (
(self.platform == other.platform)
and (self.abi == other.abi)
and (self.interpreter == other.interpreter)
)
def __hash__(self):
return hash((self._interpreter, self._abi, self._platform))
def __str__(self):
return "{}-{}-{}".format(self._interpreter, self._abi, self._platform)
def __repr__(self):
return "<{self} @ {self_id}>".format(self=self, self_id=id(self))
def parse_tag(tag):
tags = set()
interpreters, abis, platforms = tag.split("-")
for interpreter in interpreters.split("."):
for abi in abis.split("."):
for platform_ in platforms.split("."):
tags.add(Tag(interpreter, abi, platform_))
return frozenset(tags)
def _normalize_string(string):
return string.replace(".", "_").replace("-", "_")
def _cpython_interpreter(py_version):
# TODO: Is using py_version_nodot for interpreter version critical?
return "cp{major}{minor}".format(major=py_version[0], minor=py_version[1])
def _cpython_abis(py_version):
abis = []
version = "{}{}".format(*py_version[:2])
debug = pymalloc = ucs4 = ""
with_debug = sysconfig.get_config_var("Py_DEBUG")
has_refcount = hasattr(sys, "gettotalrefcount")
# Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
# extension modules is the best option.
# https://github.com/pypa/pip/issues/3383#issuecomment-173267692
has_ext = "_d.pyd" in EXTENSION_SUFFIXES
if with_debug or (with_debug is None and (has_refcount or has_ext)):
debug = "d"
if py_version < (3, 8):
with_pymalloc = sysconfig.get_config_var("WITH_PYMALLOC")
if with_pymalloc or with_pymalloc is None:
pymalloc = "m"
if py_version < (3, 3):
unicode_size = sysconfig.get_config_var("Py_UNICODE_SIZE")
if unicode_size == 4 or (
unicode_size is None and sys.maxunicode == 0x10FFFF
):
ucs4 = "u"
elif debug:
# Debug builds can also load "normal" extension modules.
# We can also assume no UCS-4 or pymalloc requirement.
abis.append("cp{version}".format(version=version))
abis.insert(
0,
"cp{version}{debug}{pymalloc}{ucs4}".format(
version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
),
)
return abis
def _cpython_tags(py_version, interpreter, abis, platforms):
for abi in abis:
for platform_ in platforms:
yield Tag(interpreter, abi, platform_)
for tag in (Tag(interpreter, "abi3", platform_) for platform_ in platforms):
yield tag
for tag in (Tag(interpreter, "none", platform_) for platform_ in platforms):
yield tag
# PEP 384 was first implemented in Python 3.2.
for minor_version in range(py_version[1] - 1, 1, -1):
for platform_ in platforms:
interpreter = "cp{major}{minor}".format(
major=py_version[0], minor=minor_version
)
yield Tag(interpreter, "abi3", platform_)
def _pypy_interpreter():
return "pp{py_major}{pypy_major}{pypy_minor}".format(
py_major=sys.version_info[0],
pypy_major=sys.pypy_version_info.major,
pypy_minor=sys.pypy_version_info.minor,
)
def _generic_abi():
abi = sysconfig.get_config_var("SOABI")
if abi:
return _normalize_string(abi)
else:
return "none"
def _pypy_tags(py_version, interpreter, abi, platforms):
for tag in (Tag(interpreter, abi, platform) for platform in platforms):
yield tag
for tag in (Tag(interpreter, "none", platform) for platform in platforms):
yield tag
def _generic_tags(interpreter, py_version, abi, platforms):
for tag in (Tag(interpreter, abi, platform) for platform in platforms):
yield tag
if abi != "none":
tags = (Tag(interpreter, "none", platform_) for platform_ in platforms)
for tag in tags:
yield tag
def _py_interpreter_range(py_version):
"""
Yield Python versions in descending order.
After the latest version, the major-only version will be yielded, and then
all following versions up to 'end'.
"""
yield "py{major}{minor}".format(major=py_version[0], minor=py_version[1])
yield "py{major}".format(major=py_version[0])
for minor in range(py_version[1] - 1, -1, -1):
yield "py{major}{minor}".format(major=py_version[0], minor=minor)
def _independent_tags(interpreter, py_version, platforms):
"""
Return the sequence of tags that are consistent across implementations.
The tags consist of:
- py*-none-<platform>
- <interpreter>-none-any
- py*-none-any
"""
for version in _py_interpreter_range(py_version):
for platform_ in platforms:
yield Tag(version, "none", platform_)
yield Tag(interpreter, "none", "any")
for version in _py_interpreter_range(py_version):
yield Tag(version, "none", "any")
def _mac_arch(arch, is_32bit=_32_BIT_INTERPRETER):
if not is_32bit:
return arch
if arch.startswith("ppc"):
return "ppc"
return "i386"
def _mac_binary_formats(version, cpu_arch):
formats = [cpu_arch]
if cpu_arch == "x86_64":
if version < (10, 4):
return []
formats.extend(["intel", "fat64", "fat32"])
elif cpu_arch == "i386":
if version < (10, 4):
return []
formats.extend(["intel", "fat32", "fat"])
elif cpu_arch == "ppc64":
# TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
if version > (10, 5) or version < (10, 4):
return []
formats.append("fat64")
elif cpu_arch == "ppc":
if version > (10, 6):
return []
formats.extend(["fat32", "fat"])
formats.append("universal")
return formats
def _mac_platforms(version=None, arch=None):
version_str, _, cpu_arch = platform.mac_ver()
if version is None:
version = tuple(map(int, version_str.split(".")[:2]))
if arch is None:
arch = _mac_arch(cpu_arch)
platforms = []
for minor_version in range(version[1], -1, -1):
compat_version = version[0], minor_version
binary_formats = _mac_binary_formats(compat_version, arch)
for binary_format in binary_formats:
platforms.append(
"macosx_{major}_{minor}_{binary_format}".format(
major=compat_version[0],
minor=compat_version[1],
binary_format=binary_format,
)
)
return platforms
# From PEP 513.
def _is_manylinux_compatible(name, glibc_version):
# Check for presence of _manylinux module.
try:
import _manylinux
return bool(getattr(_manylinux, name + "_compatible"))
except (ImportError, AttributeError):
# Fall through to heuristic check below.
pass
return _have_compatible_glibc(*glibc_version)
def _glibc_version_string():
# Returns glibc version string, or None if not using glibc.
import ctypes
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
# manpage says, "If filename is NULL, then the returned handle is for the
# main program". This way we can let the linker do the work to figure out
# which libc our process is actually using.
process_namespace = ctypes.CDLL(None)
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return None
# Call gnu_get_libc_version, which returns a string like "2.5"
gnu_get_libc_version.restype = ctypes.c_char_p
version_str = gnu_get_libc_version()
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
return version_str
# Separated out from have_compatible_glibc for easier unit testing.
def _check_glibc_version(version_str, required_major, minimum_minor):
# Parse string and check against requested version.
#
# We use a regexp instead of str.split because we want to discard any
# random junk that might come after the minor version -- this might happen
# in patched/forked versions of glibc (e.g. Linaro's version of glibc
# uses version strings like "2.20-2014.11"). See gh-3588.
m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
if not m:
warnings.warn(
"Expected glibc version with 2 components major.minor,"
" got: %s" % version_str,
RuntimeWarning,
)
return False
return (
int(m.group("major")) == required_major
and int(m.group("minor")) >= minimum_minor
)
def _have_compatible_glibc(required_major, minimum_minor):
version_str = _glibc_version_string()
if version_str is None:
return False
return _check_glibc_version(version_str, required_major, minimum_minor)
def _linux_platforms(is_32bit=_32_BIT_INTERPRETER):
linux = _normalize_string(distutils.util.get_platform())
if linux == "linux_x86_64" and is_32bit:
linux = "linux_i686"
manylinux_support = (
("manylinux2014", (2, 17)), # CentOS 7 w/ glibc 2.17 (PEP 599)
("manylinux2010", (2, 12)), # CentOS 6 w/ glibc 2.12 (PEP 571)
("manylinux1", (2, 5)), # CentOS 5 w/ glibc 2.5 (PEP 513)
)
manylinux_support_iter = iter(manylinux_support)
for name, glibc_version in manylinux_support_iter:
if _is_manylinux_compatible(name, glibc_version):
platforms = [linux.replace("linux", name)]
break
else:
platforms = []
# Support for a later manylinux implies support for an earlier version.
platforms += [linux.replace("linux", name) for name, _ in manylinux_support_iter]
platforms.append(linux)
return platforms
def _generic_platforms():
platform = _normalize_string(distutils.util.get_platform())
return [platform]
def _interpreter_name():
name = platform.python_implementation().lower()
return INTERPRETER_SHORT_NAMES.get(name) or name
def _generic_interpreter(name, py_version):
version = sysconfig.get_config_var("py_version_nodot")
if not version:
version = "".join(map(str, py_version[:2]))
return "{name}{version}".format(name=name, version=version)
def sys_tags():
"""
Returns the sequence of tag triples for the running interpreter.
The order of the sequence corresponds to priority order for the
interpreter, from most to least important.
"""
py_version = sys.version_info[:2]
interpreter_name = _interpreter_name()
if platform.system() == "Darwin":
platforms = _mac_platforms()
elif platform.system() == "Linux":
platforms = _linux_platforms()
else:
platforms = _generic_platforms()
if interpreter_name == "cp":
interpreter = _cpython_interpreter(py_version)
abis = _cpython_abis(py_version)
for tag in _cpython_tags(py_version, interpreter, abis, platforms):
yield tag
elif interpreter_name == "pp":
interpreter = _pypy_interpreter()
abi = _generic_abi()
for tag in _pypy_tags(py_version, interpreter, abi, platforms):
yield tag
else:
interpreter = _generic_interpreter(interpreter_name, py_version)
abi = _generic_abi()
for tag in _generic_tags(interpreter, py_version, abi, platforms):
yield tag
for tag in _independent_tags(interpreter, py_version, platforms):
yield tag
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor/packaging/version.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
from ._structures import Infinity
__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
_Version = collections.namedtuple(
"_Version", ["epoch", "release", "dev", "pre", "post", "local"]
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def epoch(self):
return -1
@property
def release(self):
return None
@property
def pre(self):
return None
@property
def post(self):
return None
@property
def dev(self):
return None
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
@property
def is_devrelease(self):
return False
_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
_legacy_version_replacement_map = {
"pre": "c",
"preview": "c",
"-": "final-",
"rc": "c",
"dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=_parse_letter_version(
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
),
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self.epoch != 0:
parts.append("{0}!".format(self.epoch))
# Release segment
parts.append(".".join(str(x) for x in self.release))
# Pre-release
if self.pre is not None:
parts.append("".join(str(x) for x in self.pre))
# Post-release
if self.post is not None:
parts.append(".post{0}".format(self.post))
# Development release
if self.dev is not None:
parts.append(".dev{0}".format(self.dev))
# Local version segment
if self.local is not None:
parts.append("+{0}".format(self.local))
return "".join(parts)
@property
def epoch(self):
return self._version.epoch
@property
def release(self):
return self._version.release
@property
def pre(self):
return self._version.pre
@property
def post(self):
return self._version.post[1] if self._version.post else None
@property
def dev(self):
return self._version.dev[1] if self._version.dev else None
@property
def local(self):
if self._version.local:
return ".".join(str(x) for x in self._version.local)
else:
return None
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self.epoch != 0:
parts.append("{0}!".format(self.epoch))
# Release segment
parts.append(".".join(str(x) for x in self.release))
return "".join(parts)
@property
def is_prerelease(self):
return self.dev is not None or self.pre is not None
@property
def is_postrelease(self):
return self.post is not None
@property
def is_devrelease(self):
return self.dev is not None
def _parse_letter_version(letter, number):
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
_local_version_separators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_separators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple((i, "") if isinstance(i, int) else (-Infinity, i) for i in local)
return epoch, release, pre, post, dev, local
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor/packaging/__init__.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from .__about__ import (
__author__,
__copyright__,
__email__,
__license__,
__summary__,
__title__,
__uri__,
__version__,
)
__all__ = [
"__title__",
"__summary__",
"__uri__",
"__version__",
"__author__",
"__email__",
"__license__",
"__copyright__",
]
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor/packaging/utils.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import re
from .version import InvalidVersion, Version
_canonicalize_regex = re.compile(r"[-_.]+")
def canonicalize_name(name):
# This is taken from PEP 503.
return _canonicalize_regex.sub("-", name).lower()
def canonicalize_version(version):
"""
This is very similar to Version.__str__, but has one subtle differences
with the way it handles the release segment.
"""
try:
version = Version(version)
except InvalidVersion:
# Legacy versions cannot be normalized
return version
parts = []
# Epoch
if version.epoch != 0:
parts.append("{0}!".format(version.epoch))
# Release segment
# NB: This strips trailing '.0's to normalize
parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in version.release)))
# Pre-release
if version.pre is not None:
parts.append("".join(str(x) for x in version.pre))
# Post-release
if version.post is not None:
parts.append(".post{0}".format(version.post))
# Development release
if version.dev is not None:
parts.append(".dev{0}".format(version.dev))
# Local version segment
if version.local is not None:
parts.append("+{0}".format(version.local))
return "".join(parts)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor/packaging/requirements.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import string
import re
from setuptools.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException
from setuptools.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
from setuptools.extern.pyparsing import Literal as L # noqa
from setuptools.extern.six.moves.urllib import parse as urlparse
from .markers import MARKER_EXPR, Marker
from .specifiers import LegacySpecifier, Specifier, SpecifierSet
class InvalidRequirement(ValueError):
"""
An invalid requirement was found, users should refer to PEP 508.
"""
ALPHANUM = Word(string.ascii_letters + string.digits)
LBRACKET = L("[").suppress()
RBRACKET = L("]").suppress()
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
COMMA = L(",").suppress()
SEMICOLON = L(";").suppress()
AT = L("@").suppress()
PUNCTUATION = Word("-_.")
IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
NAME = IDENTIFIER("name")
EXTRA = IDENTIFIER
URI = Regex(r"[^ ]+")("url")
URL = AT + URI
EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
VERSION_MANY = Combine(
VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
)("_raw_spec")
_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
MARKER_EXPR.setParseAction(
lambda s, l, t: Marker(s[t._original_start : t._original_end])
)
MARKER_SEPARATOR = SEMICOLON
MARKER = MARKER_SEPARATOR + MARKER_EXPR
VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
URL_AND_MARKER = URL + Optional(MARKER)
NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
# setuptools.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see
# issue #104
REQUIREMENT.parseString("x[]")
class Requirement(object):
"""Parse a requirement.
Parse a given requirement string into its parts, such as name, specifier,
URL, and extras. Raises InvalidRequirement on a badly-formed requirement
string.
"""
# TODO: Can we test whether something is contained within a requirement?
# If so how do we do that? Do we need to test against the _name_ of
# the thing as well as the version? What about the markers?
# TODO: Can we normalize the name and extra name?
def __init__(self, requirement_string):
try:
req = REQUIREMENT.parseString(requirement_string)
except ParseException as e:
raise InvalidRequirement(
'Parse error at "{0!r}": {1}'.format(
requirement_string[e.loc : e.loc + 8], e.msg
)
)
self.name = req.name
if req.url:
parsed_url = urlparse.urlparse(req.url)
if parsed_url.scheme == "file":
if urlparse.urlunparse(parsed_url) != req.url:
raise InvalidRequirement("Invalid URL given")
elif not (parsed_url.scheme and parsed_url.netloc) or (
not parsed_url.scheme and not parsed_url.netloc
):
raise InvalidRequirement("Invalid URL: {0}".format(req.url))
self.url = req.url
else:
self.url = None
self.extras = set(req.extras.asList() if req.extras else [])
self.specifier = SpecifierSet(req.specifier)
self.marker = req.marker if req.marker else None
def __str__(self):
parts = [self.name]
if self.extras:
parts.append("[{0}]".format(",".join(sorted(self.extras))))
if self.specifier:
parts.append(str(self.specifier))
if self.url:
parts.append("@ {0}".format(self.url))
if self.marker:
parts.append(" ")
if self.marker:
parts.append("; {0}".format(self.marker))
return "".join(parts)
def __repr__(self):
return "<Requirement({0!r})>".format(str(self))
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor/packaging/_structures.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
class Infinity(object):
def __repr__(self):
return "Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __neg__(self):
return NegativeInfinity
Infinity = Infinity()
class NegativeInfinity(object):
def __repr__(self):
return "-Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return True
def __le__(self, other):
return True
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __neg__(self):
return Infinity
NegativeInfinity = NegativeInfinity()
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor/packaging/markers.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import operator
import os
import platform
import sys
from setuptools.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
from setuptools.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
from setuptools.extern.pyparsing import Literal as L # noqa
from ._compat import string_types
from .specifiers import Specifier, InvalidSpecifier
__all__ = [
"InvalidMarker",
"UndefinedComparison",
"UndefinedEnvironmentName",
"Marker",
"default_environment",
]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
class Node(object):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
def serialize(self):
raise NotImplementedError
class Variable(Node):
def serialize(self):
return str(self)
class Value(Node):
def serialize(self):
return '"{0}"'.format(self)
class Op(Node):
def serialize(self):
return str(self)
VARIABLE = (
L("implementation_version")
| L("platform_python_implementation")
| L("implementation_name")
| L("python_full_version")
| L("platform_release")
| L("platform_version")
| L("platform_machine")
| L("platform_system")
| L("python_version")
| L("sys_platform")
| L("os_name")
| L("os.name")
| L("sys.platform") # PEP-345
| L("platform.version") # PEP-345
| L("platform.machine") # PEP-345
| L("platform.python_implementation") # PEP-345
| L("python_implementation") # PEP-345
| L("extra") # undocumented setuptools legacy
)
ALIASES = {
"os.name": "os_name",
"sys.platform": "sys_platform",
"platform.version": "platform_version",
"platform.machine": "platform_machine",
"platform.python_implementation": "platform_python_implementation",
"python_implementation": "platform_python_implementation",
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
VERSION_CMP = (
L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
BOOLOP = L("and") | L("or")
MARKER_VAR = VARIABLE | MARKER_VALUE
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
def _coerce_parse_result(results):
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
def _format_marker(marker, first=True):
assert isinstance(marker, (list, tuple, string_types))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (
isinstance(marker, list)
and len(marker) == 1
and isinstance(marker[0], (list, tuple))
):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _eval_op(lhs, op, rhs):
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs)
oper = _operators.get(op.serialize())
if oper is None:
raise UndefinedComparison(
"Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
)
return oper(lhs, rhs)
_undefined = object()
def _get_env(environment, name):
value = environment.get(name, _undefined)
if value is _undefined:
raise UndefinedEnvironmentName(
"{0!r} does not exist in evaluation environment.".format(name)
)
return value
def _evaluate_markers(markers, environment):
groups = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, string_types))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value)
rhs_value = rhs.value
else:
lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info):
version = "{0.major}.{0.minor}.{0.micro}".format(info)
kind = info.releaselevel
if kind != "final":
version += kind[0] + str(info.serial)
return version
def default_environment():
if hasattr(sys, "implementation"):
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
else:
iver = "0"
implementation_name = ""
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": ".".join(platform.python_version_tuple()[:2]),
"sys_platform": sys.platform,
}
class Marker(object):
def __init__(self, marker):
try:
self._markers = _coerce_parse_result(MARKER.parseString(marker))
except ParseException as e:
err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
marker, marker[e.loc : e.loc + 8]
)
raise InvalidMarker(err_str)
def __str__(self):
return _format_marker(self._markers)
def __repr__(self):
return "<Marker({0!r})>".format(str(self))
def evaluate(self, environment=None):
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
"""
current_environment = default_environment()
if environment is not None:
current_environment.update(environment)
return _evaluate_markers(self._markers, current_environment)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor/packaging/__about__.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__",
"__summary__",
"__uri__",
"__version__",
"__author__",
"__email__",
"__license__",
"__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "19.2"
__author__ = "Donald Stufft and individual contributors"
__email__ = "donald@stufft.io"
__license__ = "BSD or Apache License, Version 2.0"
__copyright__ = "Copyright 2014-2019 %s" % __author__
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor/packaging/_compat.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
# flake8: noqa
if PY3:
string_types = (str,)
else:
string_types = (basestring,)
def with_metaclass(meta, *bases):
"""
Create a base class with a metaclass.
"""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, "temporary_class", (), {})
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/_vendor/packaging/specifiers.py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import functools
import itertools
import re
from ._compat import string_types, with_metaclass
from .version import Version, LegacyVersion, parse
class InvalidSpecifier(ValueError):
"""
An invalid specifier was found, users should refer to PEP 440.
"""
class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def __str__(self):
"""
Returns the str representation of this Specifier like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self):
"""
Returns a hash value for this Specifier like object.
"""
@abc.abstractmethod
def __eq__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are equal.
"""
@abc.abstractmethod
def __ne__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are not equal.
"""
@abc.abstractproperty
def prereleases(self):
"""
Returns whether or not pre-releases as a whole are allowed by this
specifier.
"""
@prereleases.setter
def prereleases(self, value):
"""
Sets whether or not pre-releases as a whole are allowed by this
specifier.
"""
@abc.abstractmethod
def contains(self, item, prereleases=None):
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(self, iterable, prereleases=None):
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class _IndividualSpecifier(BaseSpecifier):
_operators = {}
def __init__(self, spec="", prereleases=None):
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
self._spec = (match.group("operator").strip(), match.group("version").strip())
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<{0}({1!r}{2})>".format(self.__class__.__name__, str(self), pre)
def __str__(self):
return "{0}{1}".format(*self._spec)
def __hash__(self):
return hash(self._spec)
def __eq__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec == other._spec
def __ne__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec != other._spec
def _get_operator(self, op):
return getattr(self, "_compare_{0}".format(self._operators[op]))
def _coerce_version(self, version):
if not isinstance(version, (LegacyVersion, Version)):
version = parse(version)
return version
@property
def operator(self):
return self._spec[0]
@property
def version(self):
return self._spec[1]
@property
def prereleases(self):
return self._prereleases
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version or LegacyVersion, this allows us to have
# a shortcut for ``"2.0" in Specifier(">=2")
item = self._coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
return self._get_operator(self.operator)(item, self.version)
def filter(self, iterable, prereleases=None):
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = self._coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later incase nothing
# else matches this specifier.
if parsed_version.is_prerelease and not (
prereleases or self.prereleases
):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the beginning.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
class LegacySpecifier(_IndividualSpecifier):
_regex_str = r"""
(?P<operator>(==|!=|<=|>=|<|>))
\s*
(?P<version>
[^,;\s)]* # Since this is a "legacy" specifier, and the version
# string can be just about anything, we match everything
# except for whitespace, a semi-colon for marker support,
# a closing paren since versions can be enclosed in
# them, and a comma since it's a version separator.
)
"""
_regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
}
def _coerce_version(self, version):
if not isinstance(version, LegacyVersion):
version = LegacyVersion(str(version))
return version
def _compare_equal(self, prospective, spec):
return prospective == self._coerce_version(spec)
def _compare_not_equal(self, prospective, spec):
return prospective != self._coerce_version(spec)
def _compare_less_than_equal(self, prospective, spec):
return prospective <= self._coerce_version(spec)
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= self._coerce_version(spec)
def _compare_less_than(self, prospective, spec):
return prospective < self._coerce_version(spec)
def _compare_greater_than(self, prospective, spec):
return prospective > self._coerce_version(spec)
def _require_version_compare(fn):
@functools.wraps(fn)
def wrapped(self, prospective, spec):
if not isinstance(prospective, Version):
return False
return fn(self, prospective, spec)
return wrapped
class Specifier(_IndividualSpecifier):
_regex_str = r"""
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s]* # We just match everything, except for whitespace
# since we are only testing for strict identity.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
# You cannot use a wild card and a dev or local version
# together so group them with a | and make them optional.
(?:
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
|
\.\* # Wild card syntax of .*
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
"""
_regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
@_require_version_compare
def _compare_compatible(self, prospective, spec):
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore post and dev releases and we want to treat the pre-release as
# it's own separate segment.
prefix = ".".join(
list(
itertools.takewhile(
lambda x: (not x.startswith("post") and not x.startswith("dev")),
_version_split(spec),
)
)[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
prospective, prefix
)
@_require_version_compare
def _compare_equal(self, prospective, spec):
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# In the case of prefix matching we want to ignore local segment.
prospective = Version(prospective.public)
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
spec = _version_split(spec[:-2]) # Remove the trailing .*
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
prospective = _version_split(str(prospective))
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
prospective = prospective[: len(spec)]
# Pad out our two sides with zeros so that they both equal the same
# length.
spec, prospective = _pad_version(spec, prospective)
else:
# Convert our spec string into a Version
spec = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec.local:
prospective = Version(prospective.public)
return prospective == spec
@_require_version_compare
def _compare_not_equal(self, prospective, spec):
return not self._compare_equal(prospective, spec)
@_require_version_compare
def _compare_less_than_equal(self, prospective, spec):
return prospective <= Version(spec)
@_require_version_compare
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= Version(spec)
@_require_version_compare
def _compare_less_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if not spec.is_prerelease and prospective.is_prerelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
@_require_version_compare
def _compare_greater_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a post-release version, that we do not accept
# post-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
if not spec.is_postrelease and prospective.is_postrelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is technically greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective, spec):
return str(prospective).lower() == str(spec).lower()
@property
def prereleases(self):
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~=", "==="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if parse(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version):
result = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _pad_version(left, right):
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split[0]) :])
right_split.append(right[len(right_split[0]) :])
# Insert our padding
left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
class SpecifierSet(BaseSpecifier):
def __init__(self, specifiers="", prereleases=None):
# Split on , to break each indidivual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier and falling back to a LegacySpecifier.
parsed = set()
for specifier in specifiers:
try:
parsed.add(Specifier(specifier))
except InvalidSpecifier:
parsed.add(LegacySpecifier(specifier))
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
def __str__(self):
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self):
return hash(self._specs)
def __and__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __ne__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs != other._specs
def __len__(self):
return len(self._specs)
def __iter__(self):
return iter(self._specs)
@property
def prereleases(self):
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def __contains__(self, item):
return self.contains(item)
def contains(self, item, prereleases=None):
# Ensure that our item is a Version or LegacyVersion instance.
if not isinstance(item, (LegacyVersion, Version)):
item = parse(item)
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if not prereleases and item.is_prerelease:
return False
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(s.contains(item, prereleases=prereleases) for s in self._specs)
def filter(self, iterable, prereleases=None):
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=bool(prereleases))
return iterable
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases, and which will filter out LegacyVersion in general.
else:
filtered = []
found_prereleases = []
for item in iterable:
# Ensure that we some kind of Version class for this item.
if not isinstance(item, (LegacyVersion, Version)):
parsed_version = parse(item)
else:
parsed_version = item
# Filter out any item which is parsed as a LegacyVersion
if isinstance(parsed_version, LegacyVersion):
continue
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return found_prereleases
return filtered
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/bdist_egg.py
|
"""setuptools.command.bdist_egg
Build .egg distributions"""
from distutils.errors import DistutilsSetupError
from distutils.dir_util import remove_tree, mkpath
from distutils import log
from types import CodeType
import sys
import os
import re
import textwrap
import marshal
import warnings
from setuptools.extern import six
from pkg_resources import get_build_platform, Distribution, ensure_directory
from pkg_resources import EntryPoint
from setuptools.extension import Library
from setuptools import Command, SetuptoolsDeprecationWarning
try:
# Python 2.7 or >=3.2
from sysconfig import get_path, get_python_version
def _get_purelib():
return get_path("purelib")
except ImportError:
from distutils.sysconfig import get_python_lib, get_python_version
def _get_purelib():
return get_python_lib(False)
def strip_module(filename):
if '.' in filename:
filename = os.path.splitext(filename)[0]
if filename.endswith('module'):
filename = filename[:-6]
return filename
def sorted_walk(dir):
"""Do os.walk in a reproducible way,
independent of indeterministic filesystem readdir order
"""
for base, dirs, files in os.walk(dir):
dirs.sort()
files.sort()
yield base, dirs, files
def write_stub(resource, pyfile):
_stub_template = textwrap.dedent("""
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources
from importlib.machinery import ExtensionFileLoader
__file__ = pkg_resources.resource_filename(__name__, %r)
__loader__ = None; del __bootstrap__, __loader__
ExtensionFileLoader(__name__,__file__).load_module()
__bootstrap__()
""").lstrip()
with open(pyfile, 'w') as f:
f.write(_stub_template % resource)
class bdist_egg(Command):
description = "create an \"egg\" distribution"
user_options = [
('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p', "platform name to embed in generated filenames "
"(default: %s)" % get_build_platform()),
('exclude-source-files', None,
"remove all .py files from the generated egg"),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
]
boolean_options = [
'keep-temp', 'skip-build', 'exclude-source-files'
]
def initialize_options(self):
self.bdist_dir = None
self.plat_name = None
self.keep_temp = 0
self.dist_dir = None
self.skip_build = 0
self.egg_output = None
self.exclude_source_files = None
def finalize_options(self):
ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
self.egg_info = ei_cmd.egg_info
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'egg')
if self.plat_name is None:
self.plat_name = get_build_platform()
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
if self.egg_output is None:
# Compute filename of the output egg
basename = Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version,
get_python_version(),
self.distribution.has_ext_modules() and self.plat_name
).egg_name()
self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
def do_install_data(self):
# Hack for packages that install data to install's --install-lib
self.get_finalized_command('install').install_lib = self.bdist_dir
site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
old, self.distribution.data_files = self.distribution.data_files, []
for item in old:
if isinstance(item, tuple) and len(item) == 2:
if os.path.isabs(item[0]):
realpath = os.path.realpath(item[0])
normalized = os.path.normcase(realpath)
if normalized == site_packages or normalized.startswith(
site_packages + os.sep
):
item = realpath[len(site_packages) + 1:], item[1]
# XXX else: raise ???
self.distribution.data_files.append(item)
try:
log.info("installing package data to %s", self.bdist_dir)
self.call_command('install_data', force=0, root=None)
finally:
self.distribution.data_files = old
def get_outputs(self):
return [self.egg_output]
def call_command(self, cmdname, **kw):
"""Invoke reinitialized command `cmdname` with keyword args"""
for dirname in INSTALL_DIRECTORY_ATTRS:
kw.setdefault(dirname, self.bdist_dir)
kw.setdefault('skip_build', self.skip_build)
kw.setdefault('dry_run', self.dry_run)
cmd = self.reinitialize_command(cmdname, **kw)
self.run_command(cmdname)
return cmd
def run(self):
# Generate metadata first
self.run_command("egg_info")
# We run install_lib before install_data, because some data hacks
# pull their data path from the install_lib command.
log.info("installing library code to %s", self.bdist_dir)
instcmd = self.get_finalized_command('install')
old_root = instcmd.root
instcmd.root = None
if self.distribution.has_c_libraries() and not self.skip_build:
self.run_command('build_clib')
cmd = self.call_command('install_lib', warn_dir=0)
instcmd.root = old_root
all_outputs, ext_outputs = self.get_ext_outputs()
self.stubs = []
to_compile = []
for (p, ext_name) in enumerate(ext_outputs):
filename, ext = os.path.splitext(ext_name)
pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
'.py')
self.stubs.append(pyfile)
log.info("creating stub loader for %s", ext_name)
if not self.dry_run:
write_stub(os.path.basename(ext_name), pyfile)
to_compile.append(pyfile)
ext_outputs[p] = ext_name.replace(os.sep, '/')
if to_compile:
cmd.byte_compile(to_compile)
if self.distribution.data_files:
self.do_install_data()
# Make the EGG-INFO directory
archive_root = self.bdist_dir
egg_info = os.path.join(archive_root, 'EGG-INFO')
self.mkpath(egg_info)
if self.distribution.scripts:
script_dir = os.path.join(egg_info, 'scripts')
log.info("installing scripts to %s", script_dir)
self.call_command('install_scripts', install_dir=script_dir,
no_ep=1)
self.copy_metadata_to(egg_info)
native_libs = os.path.join(egg_info, "native_libs.txt")
if all_outputs:
log.info("writing %s", native_libs)
if not self.dry_run:
ensure_directory(native_libs)
libs_file = open(native_libs, 'wt')
libs_file.write('\n'.join(all_outputs))
libs_file.write('\n')
libs_file.close()
elif os.path.isfile(native_libs):
log.info("removing %s", native_libs)
if not self.dry_run:
os.unlink(native_libs)
write_safety_flag(
os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
)
if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
log.warn(
"WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
if self.exclude_source_files:
self.zap_pyfiles()
# Make the archive
make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
dry_run=self.dry_run, mode=self.gen_header())
if not self.keep_temp:
remove_tree(self.bdist_dir, dry_run=self.dry_run)
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_egg', get_python_version(), self.egg_output))
def zap_pyfiles(self):
log.info("Removing .py files from temporary directory")
for base, dirs, files in walk_egg(self.bdist_dir):
for name in files:
path = os.path.join(base, name)
if name.endswith('.py'):
log.debug("Deleting %s", path)
os.unlink(path)
if base.endswith('__pycache__'):
path_old = path
pattern = r'(?P<name>.+)\.(?P<magic>[^.]+)\.pyc'
m = re.match(pattern, name)
path_new = os.path.join(
base, os.pardir, m.group('name') + '.pyc')
log.info(
"Renaming file from [%s] to [%s]"
% (path_old, path_new))
try:
os.remove(path_new)
except OSError:
pass
os.rename(path_old, path_new)
def zip_safe(self):
safe = getattr(self.distribution, 'zip_safe', None)
if safe is not None:
return safe
log.warn("zip_safe flag not set; analyzing archive contents...")
return analyze_egg(self.bdist_dir, self.stubs)
def gen_header(self):
epm = EntryPoint.parse_map(self.distribution.entry_points or '')
ep = epm.get('setuptools.installation', {}).get('eggsecutable')
if ep is None:
return 'w' # not an eggsecutable, do it the usual way.
warnings.warn(
"Eggsecutables are deprecated and will be removed in a future "
"version.",
SetuptoolsDeprecationWarning
)
if not ep.attrs or ep.extras:
raise DistutilsSetupError(
"eggsecutable entry point (%r) cannot have 'extras' "
"or refer to a module" % (ep,)
)
pyver = '{}.{}'.format(*sys.version_info)
pkg = ep.module_name
full = '.'.join(ep.attrs)
base = ep.attrs[0]
basename = os.path.basename(self.egg_output)
header = (
"#!/bin/sh\n"
'if [ `basename $0` = "%(basename)s" ]\n'
'then exec python%(pyver)s -c "'
"import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
"from %(pkg)s import %(base)s; sys.exit(%(full)s())"
'" "$@"\n'
'else\n'
' echo $0 is not the correct name for this egg file.\n'
' echo Please rename it back to %(basename)s and try again.\n'
' exec false\n'
'fi\n'
) % locals()
if not self.dry_run:
mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
f = open(self.egg_output, 'w')
f.write(header)
f.close()
return 'a'
def copy_metadata_to(self, target_dir):
"Copy metadata (egg info) to the target_dir"
# normalize the path (so that a forward-slash in egg_info will
# match using startswith below)
norm_egg_info = os.path.normpath(self.egg_info)
prefix = os.path.join(norm_egg_info, '')
for path in self.ei_cmd.filelist.files:
if path.startswith(prefix):
target = os.path.join(target_dir, path[len(prefix):])
ensure_directory(target)
self.copy_file(path, target)
def get_ext_outputs(self):
"""Get a list of relative paths to C extensions in the output distro"""
all_outputs = []
ext_outputs = []
paths = {self.bdist_dir: ''}
for base, dirs, files in sorted_walk(self.bdist_dir):
for filename in files:
if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
all_outputs.append(paths[base] + filename)
for filename in dirs:
paths[os.path.join(base, filename)] = (paths[base] +
filename + '/')
if self.distribution.has_ext_modules():
build_cmd = self.get_finalized_command('build_ext')
for ext in build_cmd.extensions:
if isinstance(ext, Library):
continue
fullname = build_cmd.get_ext_fullname(ext.name)
filename = build_cmd.get_ext_filename(fullname)
if not os.path.basename(filename).startswith('dl-'):
if os.path.exists(os.path.join(self.bdist_dir, filename)):
ext_outputs.append(filename)
return all_outputs, ext_outputs
NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
def walk_egg(egg_dir):
"""Walk an unpacked egg's contents, skipping the metadata directory"""
walker = sorted_walk(egg_dir)
base, dirs, files = next(walker)
if 'EGG-INFO' in dirs:
dirs.remove('EGG-INFO')
yield base, dirs, files
for bdf in walker:
yield bdf
def analyze_egg(egg_dir, stubs):
# check for existing flag in EGG-INFO
for flag, fn in safety_flags.items():
if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
return flag
if not can_scan():
return False
safe = True
for base, dirs, files in walk_egg(egg_dir):
for name in files:
if name.endswith('.py') or name.endswith('.pyw'):
continue
elif name.endswith('.pyc') or name.endswith('.pyo'):
# always scan, even if we already know we're not safe
safe = scan_module(egg_dir, base, name, stubs) and safe
return safe
def write_safety_flag(egg_dir, safe):
# Write or remove zip safety flag file(s)
for flag, fn in safety_flags.items():
fn = os.path.join(egg_dir, fn)
if os.path.exists(fn):
if safe is None or bool(safe) != flag:
os.unlink(fn)
elif safe is not None and bool(safe) == flag:
f = open(fn, 'wt')
f.write('\n')
f.close()
safety_flags = {
True: 'zip-safe',
False: 'not-zip-safe',
}
def scan_module(egg_dir, base, name, stubs):
"""Check whether module possibly uses unsafe-for-zipfile stuff"""
filename = os.path.join(base, name)
if filename[:-1] in stubs:
return True # Extension module
pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
if six.PY2:
skip = 8 # skip magic & date
elif sys.version_info < (3, 7):
skip = 12 # skip magic & date & file size
else:
skip = 16 # skip magic & reserved? & date & file size
f = open(filename, 'rb')
f.read(skip)
code = marshal.load(f)
f.close()
safe = True
symbols = dict.fromkeys(iter_symbols(code))
for bad in ['__file__', '__path__']:
if bad in symbols:
log.warn("%s: module references %s", module, bad)
safe = False
if 'inspect' in symbols:
for bad in [
'getsource', 'getabsfile', 'getsourcefile', 'getfile'
'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
'getinnerframes', 'getouterframes', 'stack', 'trace'
]:
if bad in symbols:
log.warn("%s: module MAY be using inspect.%s", module, bad)
safe = False
return safe
def iter_symbols(code):
"""Yield names and strings used by `code` and its nested code objects"""
for name in code.co_names:
yield name
for const in code.co_consts:
if isinstance(const, six.string_types):
yield const
elif isinstance(const, CodeType):
for name in iter_symbols(const):
yield name
def can_scan():
if not sys.platform.startswith('java') and sys.platform != 'cli':
# CPython, PyPy, etc.
return True
log.warn("Unable to analyze compiled code on this platform.")
log.warn("Please ask the author to include a 'zip_safe'"
" setting (either True or False) in the package's setup.py")
# Attribute names of options for commands that might need to be convinced to
# install to the egg build directory
INSTALL_DIRECTORY_ATTRS = [
'install_lib', 'install_dir', 'install_data', 'install_base'
]
def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True,
mode='w'):
"""Create a zip file from all the files under 'base_dir'. The output
zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
Python module (if available) or the InfoZIP "zip" utility (if installed
and found on the default search path). If neither tool is available,
raises DistutilsExecError. Returns the name of the output zip file.
"""
import zipfile
mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
def visit(z, dirname, names):
for name in names:
path = os.path.normpath(os.path.join(dirname, name))
if os.path.isfile(path):
p = path[len(base_dir) + 1:]
if not dry_run:
z.write(path, p)
log.debug("adding '%s'", p)
compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
if not dry_run:
z = zipfile.ZipFile(zip_filename, mode, compression=compression)
for dirname, dirs, files in sorted_walk(base_dir):
visit(z, dirname, files)
z.close()
else:
for dirname, dirs, files in sorted_walk(base_dir):
visit(None, dirname, files)
return zip_filename
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/alias.py
|
from distutils.errors import DistutilsOptionError
from setuptools.extern.six.moves import map
from setuptools.command.setopt import edit_config, option_base, config_file
def shquote(arg):
"""Quote an argument for later parsing by shlex.split()"""
for c in '"', "'", "\\", "#":
if c in arg:
return repr(arg)
if arg.split() != [arg]:
return repr(arg)
return arg
class alias(option_base):
"""Define a shortcut that invokes one or more commands"""
description = "define a shortcut to invoke one or more commands"
command_consumes_arguments = True
user_options = [
('remove', 'r', 'remove (unset) the alias'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.args = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.remove and len(self.args) != 1:
raise DistutilsOptionError(
"Must specify exactly one argument (the alias name) when "
"using --remove"
)
def run(self):
aliases = self.distribution.get_option_dict('aliases')
if not self.args:
print("Command Aliases")
print("---------------")
for alias in aliases:
print("setup.py alias", format_alias(alias, aliases))
return
elif len(self.args) == 1:
alias, = self.args
if self.remove:
command = None
elif alias in aliases:
print("setup.py alias", format_alias(alias, aliases))
return
else:
print("No alias definition found for %r" % alias)
return
else:
alias = self.args[0]
command = ' '.join(map(shquote, self.args[1:]))
edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
def format_alias(name, aliases):
source, command = aliases[name]
if source == config_file('global'):
source = '--global-config '
elif source == config_file('user'):
source = '--user-config '
elif source == config_file('local'):
source = ''
else:
source = '--filename=%r' % source
return source + name + ' ' + command
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/py36compat.py
|
import os
from glob import glob
from distutils.util import convert_path
from distutils.command import sdist
from setuptools.extern.six.moves import filter
class sdist_add_defaults:
"""
Mix-in providing forward-compatibility for functionality as found in
distutils on Python 3.7.
Do not edit the code in this class except to update functionality
as implemented in distutils. Instead, override in the subclass.
"""
def add_defaults(self):
"""Add all the default files to self.filelist:
- README or README.txt
- setup.py
- test/test*.py
- all pure Python modules mentioned in setup script
- all files pointed by package_data (build_py)
- all files defined in data_files.
- all files defined as scripts.
- all C sources listed as part of extensions or C libraries
in the setup script (doesn't catch C headers!)
Warns if (README or README.txt) or setup.py are missing; everything
else is optional.
"""
self._add_defaults_standards()
self._add_defaults_optional()
self._add_defaults_python()
self._add_defaults_data_files()
self._add_defaults_ext()
self._add_defaults_c_libs()
self._add_defaults_scripts()
@staticmethod
def _cs_path_exists(fspath):
"""
Case-sensitive path existence check
>>> sdist_add_defaults._cs_path_exists(__file__)
True
>>> sdist_add_defaults._cs_path_exists(__file__.upper())
False
"""
if not os.path.exists(fspath):
return False
# make absolute so we always have a directory
abspath = os.path.abspath(fspath)
directory, filename = os.path.split(abspath)
return filename in os.listdir(directory)
def _add_defaults_standards(self):
standards = [self.READMES, self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = False
for fn in alts:
if self._cs_path_exists(fn):
got_it = True
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
', '.join(alts))
else:
if self._cs_path_exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
def _add_defaults_optional(self):
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = filter(os.path.isfile, glob(pattern))
self.filelist.extend(files)
def _add_defaults_python(self):
# build_py is used to get:
# - python modules
# - files defined in package_data
build_py = self.get_finalized_command('build_py')
# getting python files
if self.distribution.has_pure_modules():
self.filelist.extend(build_py.get_source_files())
# getting package_data files
# (computed in build_py.data_files by build_py.finalize_options)
for pkg, src_dir, build_dir, filenames in build_py.data_files:
for filename in filenames:
self.filelist.append(os.path.join(src_dir, filename))
def _add_defaults_data_files(self):
# getting distribution.data_files
if self.distribution.has_data_files():
for item in self.distribution.data_files:
if isinstance(item, str):
# plain file
item = convert_path(item)
if os.path.isfile(item):
self.filelist.append(item)
else:
# a (dirname, filenames) tuple
dirname, filenames = item
for f in filenames:
f = convert_path(f)
if os.path.isfile(f):
self.filelist.append(f)
def _add_defaults_ext(self):
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
def _add_defaults_c_libs(self):
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
def _add_defaults_scripts(self):
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
if hasattr(sdist.sdist, '_add_defaults_standards'):
# disable the functionality already available upstream
class sdist_add_defaults: # noqa
pass
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/build_ext.py
|
import os
import sys
import itertools
from distutils.command.build_ext import build_ext as _du_build_ext
from distutils.file_util import copy_file
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler, get_config_var
from distutils.errors import DistutilsError
from distutils import log
from setuptools.extension import Library
from setuptools.extern import six
if six.PY2:
import imp
EXTENSION_SUFFIXES = [
s for s, _, tp in imp.get_suffixes() if tp == imp.C_EXTENSION]
else:
from importlib.machinery import EXTENSION_SUFFIXES
try:
# Attempt to use Cython for building extensions, if available
from Cython.Distutils.build_ext import build_ext as _build_ext
# Additionally, assert that the compiler module will load
# also. Ref #1229.
__import__('Cython.Compiler.Main')
except ImportError:
_build_ext = _du_build_ext
# make sure _config_vars is initialized
get_config_var("LDSHARED")
from distutils.sysconfig import _config_vars as _CONFIG_VARS # noqa
def _customize_compiler_for_shlib(compiler):
if sys.platform == "darwin":
# building .dylib requires additional compiler flags on OSX; here we
# temporarily substitute the pyconfig.h variables so that distutils'
# 'customize_compiler' uses them before we build the shared libraries.
tmp = _CONFIG_VARS.copy()
try:
# XXX Help! I don't have any idea whether these are right...
_CONFIG_VARS['LDSHARED'] = (
"gcc -Wl,-x -dynamiclib -undefined dynamic_lookup")
_CONFIG_VARS['CCSHARED'] = " -dynamiclib"
_CONFIG_VARS['SO'] = ".dylib"
customize_compiler(compiler)
finally:
_CONFIG_VARS.clear()
_CONFIG_VARS.update(tmp)
else:
customize_compiler(compiler)
have_rtld = False
use_stubs = False
libtype = 'shared'
if sys.platform == "darwin":
use_stubs = True
elif os.name != 'nt':
try:
import dl
use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW')
except ImportError:
pass
def if_dl(s):
return s if have_rtld else ''
def get_abi3_suffix():
"""Return the file extension for an abi3-compliant Extension()"""
for suffix in EXTENSION_SUFFIXES:
if '.abi3' in suffix: # Unix
return suffix
elif suffix == '.pyd': # Windows
return suffix
class build_ext(_build_ext):
def run(self):
"""Build extensions in build directory, then copy if --inplace"""
old_inplace, self.inplace = self.inplace, 0
_build_ext.run(self)
self.inplace = old_inplace
if old_inplace:
self.copy_extensions_to_source()
def copy_extensions_to_source(self):
build_py = self.get_finalized_command('build_py')
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = self.get_ext_filename(fullname)
modpath = fullname.split('.')
package = '.'.join(modpath[:-1])
package_dir = build_py.get_package_dir(package)
dest_filename = os.path.join(package_dir,
os.path.basename(filename))
src_filename = os.path.join(self.build_lib, filename)
# Always copy, even if source is older than destination, to ensure
# that the right extensions for the current Python/platform are
# used.
copy_file(
src_filename, dest_filename, verbose=self.verbose,
dry_run=self.dry_run
)
if ext._needs_stub:
self.write_stub(package_dir or os.curdir, ext, True)
def get_ext_filename(self, fullname):
filename = _build_ext.get_ext_filename(self, fullname)
if fullname in self.ext_map:
ext = self.ext_map[fullname]
use_abi3 = (
not six.PY2
and getattr(ext, 'py_limited_api')
and get_abi3_suffix()
)
if use_abi3:
so_ext = get_config_var('EXT_SUFFIX')
filename = filename[:-len(so_ext)]
filename = filename + get_abi3_suffix()
if isinstance(ext, Library):
fn, ext = os.path.splitext(filename)
return self.shlib_compiler.library_filename(fn, libtype)
elif use_stubs and ext._links_to_dynamic:
d, fn = os.path.split(filename)
return os.path.join(d, 'dl-' + fn)
return filename
def initialize_options(self):
_build_ext.initialize_options(self)
self.shlib_compiler = None
self.shlibs = []
self.ext_map = {}
def finalize_options(self):
_build_ext.finalize_options(self)
self.extensions = self.extensions or []
self.check_extensions_list(self.extensions)
self.shlibs = [ext for ext in self.extensions
if isinstance(ext, Library)]
if self.shlibs:
self.setup_shlib_compiler()
for ext in self.extensions:
ext._full_name = self.get_ext_fullname(ext.name)
for ext in self.extensions:
fullname = ext._full_name
self.ext_map[fullname] = ext
# distutils 3.1 will also ask for module names
# XXX what to do with conflicts?
self.ext_map[fullname.split('.')[-1]] = ext
ltd = self.shlibs and self.links_to_dynamic(ext) or False
ns = ltd and use_stubs and not isinstance(ext, Library)
ext._links_to_dynamic = ltd
ext._needs_stub = ns
filename = ext._file_name = self.get_ext_filename(fullname)
libdir = os.path.dirname(os.path.join(self.build_lib, filename))
if ltd and libdir not in ext.library_dirs:
ext.library_dirs.append(libdir)
if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
ext.runtime_library_dirs.append(os.curdir)
def setup_shlib_compiler(self):
compiler = self.shlib_compiler = new_compiler(
compiler=self.compiler, dry_run=self.dry_run, force=self.force
)
_customize_compiler_for_shlib(compiler)
if self.include_dirs is not None:
compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name, value) in self.define:
compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
compiler.undefine_macro(macro)
if self.libraries is not None:
compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
compiler.set_link_objects(self.link_objects)
# hack so distutils' build_extension() builds a library instead
compiler.link_shared_object = link_shared_object.__get__(compiler)
def get_export_symbols(self, ext):
if isinstance(ext, Library):
return ext.export_symbols
return _build_ext.get_export_symbols(self, ext)
def build_extension(self, ext):
ext._convert_pyx_sources_to_lang()
_compiler = self.compiler
try:
if isinstance(ext, Library):
self.compiler = self.shlib_compiler
_build_ext.build_extension(self, ext)
if ext._needs_stub:
cmd = self.get_finalized_command('build_py').build_lib
self.write_stub(cmd, ext)
finally:
self.compiler = _compiler
def links_to_dynamic(self, ext):
"""Return true if 'ext' links to a dynamic lib in the same package"""
# XXX this should check to ensure the lib is actually being built
# XXX as dynamic, and not just using a locally-found version or a
# XXX static-compiled version
libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
pkg = '.'.join(ext._full_name.split('.')[:-1] + [''])
return any(pkg + libname in libnames for libname in ext.libraries)
def get_outputs(self):
return _build_ext.get_outputs(self) + self.__get_stubs_outputs()
def __get_stubs_outputs(self):
# assemble the base name for each extension that needs a stub
ns_ext_bases = (
os.path.join(self.build_lib, *ext._full_name.split('.'))
for ext in self.extensions
if ext._needs_stub
)
# pair each base with the extension
pairs = itertools.product(ns_ext_bases, self.__get_output_extensions())
return list(base + fnext for base, fnext in pairs)
def __get_output_extensions(self):
yield '.py'
yield '.pyc'
if self.get_finalized_command('build_py').optimize:
yield '.pyo'
def write_stub(self, output_dir, ext, compile=False):
log.info("writing stub loader for %s to %s", ext._full_name,
output_dir)
stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) +
'.py')
if compile and os.path.exists(stub_file):
raise DistutilsError(stub_file + " already exists! Please delete.")
if not self.dry_run:
f = open(stub_file, 'w')
f.write(
'\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __file__, __loader__",
" import sys, os, pkg_resources" + if_dl(", dl"),
" from importlib.machinery import ExtensionFileLoader",
" __file__ = pkg_resources.resource_filename"
"(__name__,%r)"
% os.path.basename(ext._file_name),
" del __bootstrap__",
" if '__loader__' in globals():",
" del __loader__",
if_dl(" old_flags = sys.getdlopenflags()"),
" old_dir = os.getcwd()",
" try:",
" os.chdir(os.path.dirname(__file__))",
if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
" ExtensionFileLoader(__name__,",
" __file__).load_module()",
" finally:",
if_dl(" sys.setdlopenflags(old_flags)"),
" os.chdir(old_dir)",
"__bootstrap__()",
"" # terminal \n
])
)
f.close()
if compile:
from distutils.util import byte_compile
byte_compile([stub_file], optimize=0,
force=True, dry_run=self.dry_run)
optimize = self.get_finalized_command('install_lib').optimize
if optimize > 0:
byte_compile([stub_file], optimize=optimize,
force=True, dry_run=self.dry_run)
if os.path.exists(stub_file) and not self.dry_run:
os.unlink(stub_file)
if use_stubs or os.name == 'nt':
# Build shared libraries
#
def link_shared_object(
self, objects, output_libname, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
target_lang=None):
self.link(
self.SHARED_LIBRARY, objects, output_libname,
output_dir, libraries, library_dirs, runtime_library_dirs,
export_symbols, debug, extra_preargs, extra_postargs,
build_temp, target_lang
)
else:
# Build static libraries everywhere else
libtype = 'static'
def link_shared_object(
self, objects, output_libname, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
target_lang=None):
# XXX we need to either disallow these attrs on Library instances,
# or warn/abort here if set, or something...
# libraries=None, library_dirs=None, runtime_library_dirs=None,
# export_symbols=None, extra_preargs=None, extra_postargs=None,
# build_temp=None
assert output_dir is None # distutils build_ext doesn't pass this
output_dir, filename = os.path.split(output_libname)
basename, ext = os.path.splitext(filename)
if self.library_filename("x").startswith('lib'):
# strip 'lib' prefix; this is kludgy if some platform uses
# a different prefix
basename = basename[3:]
self.create_static_lib(
objects, basename, output_dir, debug, target_lang
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/easy_install.py
|
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://setuptools.readthedocs.io/en/latest/easy_install.html
"""
from glob import glob
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import (
DistutilsArgError, DistutilsOptionError,
DistutilsError, DistutilsPlatformError,
)
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from distutils import log, dir_util
from distutils.command.build_scripts import first_line_re
from distutils.spawn import find_executable
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import textwrap
import warnings
import site
import struct
import contextlib
import subprocess
import shlex
import io
from sysconfig import get_config_vars, get_path
from setuptools import SetuptoolsDeprecationWarning
from setuptools.extern import six
from setuptools.extern.six.moves import configparser, map
from setuptools import Command
from setuptools.sandbox import run_setup
from setuptools.py27compat import rmtree_safe
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import (
PackageIndex, parse_requirement_arg, URL_SCHEME,
)
from setuptools.command import bdist_egg, egg_info
from setuptools.wheel import Wheel
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
import pkg_resources
__metaclass__ = type
# Turn on PEP440Warnings
warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
"""
Determine if two paths reference the same file.
Augments os.path.samefile to work on Windows and
suppresses errors if the path doesn't exist.
"""
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if six.PY2:
def _to_bytes(s):
return s
def isascii(s):
try:
six.text_type(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_bytes(s):
return s.encode('utf8')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
def _one_liner(text):
return textwrap.dedent(text).strip().replace('\n', '; ')
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=', 'S', "list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('no-find-links', None,
"Don't load find-links defined in packages being installed"),
('user', None, "install in user site-package '%s'" % site.USER_SITE)
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version',
'user'
]
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
# the --user option seems to be an opt-in one,
# so the default should be False.
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
extant_blockers = (
filename for filename in blockers
if os.path.exists(filename) or os.path.islink(filename)
)
list(map(self._delete_path, extant_blockers))
def _delete_path(self, path):
log.info("Deleting %s", path)
if self.dry_run:
return
is_tree = os.path.isdir(path) and not os.path.islink(path)
remover = rmtree if is_tree else os.unlink
remover(path)
@staticmethod
def _render_version():
"""
Render the Setuptools version and installation details, then exit.
"""
ver = '{}.{}'.format(*sys.version_info)
dist = get_distribution('setuptools')
tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
print(tmpl.format(**locals()))
raise SystemExit()
def finalize_options(self):
self.version and self._render_version()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
elif self.user:
log.warn("WARNING: The user site-packages directory is disabled.")
self._fix_install_dir_for_user_site()
self.expand_basedirs()
self.expand_dirs()
self._expand(
'install_dir', 'script_dir', 'build_directory',
'site_dirs',
)
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options(
'install_lib', ('install_dir', 'install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options(
'install_scripts', ('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in
self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d + " (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable:
self.check_site_dir()
self.index_url = self.index_url or "https://pypi.org/simple/"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path=self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path + sys.path)
if self.find_links is not None:
if isinstance(self.find_links, six.string_types):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path + sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize', 'optimize'))
if not isinstance(self.optimize, int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2):
raise ValueError
except ValueError as e:
raise DistutilsOptionError(
"--optimize must be 0, 1, or 2"
) from e
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _fix_install_dir_for_user_site(self):
"""
Fix the install_dir if "--user" was used.
"""
if not self.user or not site.ENABLE_USER_SITE:
return
self.create_home_path()
if self.install_userbase is None:
msg = "User base directory is not specified"
raise DistutilsPlatformError(msg)
self.install_base = self.install_platbase = self.install_userbase
scheme_name = os.name.replace('posix', 'unix') + '_user'
self.select_scheme(scheme_name)
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
dirs = [
'install_purelib',
'install_platlib',
'install_lib',
'install_headers',
'install_scripts',
'install_data',
]
self._expand_attrs(dirs)
def run(self, show_deprecation=True):
if show_deprecation:
self.announce(
"WARNING: The easy_install command is deprecated "
"and will be removed in a future version.",
log.WARN,
)
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except Exception:
pid = random.randint(0, sys.maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir, 'easy-install.pth')
if not os.path.exists(instdir):
try:
os.makedirs(instdir)
except (OSError, IOError):
self.cant_write_to_target()
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname() + '.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists:
os.unlink(testfile)
open(testfile, 'w').close()
os.unlink(testfile)
except (OSError, IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir with easy_install
pythonpath = os.environ.get('PYTHONPATH', '')
log.warn(self.__no_default_msg, self.install_dir, pythonpath)
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
if self.multi_version and not os.path.exists(pth_file):
self.pth_file = None # don't create a .pth file
self.install_dir = instdir
__cant_write_msg = textwrap.dedent("""
can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
""").lstrip() # noqa
__not_exists_id = textwrap.dedent("""
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
""").lstrip() # noqa
__access_msg = textwrap.dedent("""
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://setuptools.readthedocs.io/en/latest/easy_install.html
Please make the appropriate changes for your system and try again.
""").lstrip() # noqa
def cant_write_to_target(self):
msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += '\n' + self.__not_exists_id
else:
msg += '\n' + self.__access_msg
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname() + ".pth"
ok_file = pth_file + '.ok'
ok_exists = os.path.exists(ok_file)
tmpl = _one_liner("""
import os
f = open({ok_file!r}, 'w')
f.write('OK')
f.close()
""") + '\n'
try:
if ok_exists:
os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
os.makedirs(dirname, exist_ok=True)
f = open(pth_file, 'w')
except (OSError, IOError):
self.cant_write_to_target()
else:
try:
f.write(tmpl.format(**locals()))
f.close()
f = None
executable = sys.executable
if os.name == 'nt':
dirname, basename = os.path.split(executable)
alt = os.path.join(dirname, 'pythonw.exe')
use_alt = (
basename.lower() == 'python.exe' and
os.path.exists(alt)
)
if use_alt:
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable, '-E', '-c', 'pass'], 0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/' + script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base, filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self, spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
@contextlib.contextmanager
def _tmpdir(self):
tmpdir = tempfile.mkdtemp(prefix=u"easy_install-")
try:
# cast to str as workaround for #709 and #710 and #712
yield str(tmpdir)
finally:
os.path.exists(tmpdir) and rmtree(rmtree_safe(tmpdir))
def easy_install(self, spec, deps=False):
with self._tmpdir() as tmpdir:
if not isinstance(spec, Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
dl = self.package_index.download(spec, tmpdir)
return self.install_item(None, dl, tmpdir, deps, True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable,
not self.always_copy, self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg += " (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence == DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location == download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
if dist in self.local_index[dist.key]:
self.local_index.remove(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = Requirement(str(distreq))
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound as e:
raise DistutilsError(str(e)) from e
except VersionConflict as e:
raise DistutilsError(e.report()) from e
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = (
"%r already exists in %s; build directory %s will not be kept"
)
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename) == setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents) == 1:
dist_filename = os.path.join(setup_base, contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if self.exclude_scripts:
return
for args in ScriptWriter.best().get_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
if is_script:
body = self._load_template(dev_path) % locals()
script_text = ScriptWriter.get_header(script_text) + body
self.write_script(script_name, _to_bytes(script_text), 'b')
@staticmethod
def _load_template(dev_path):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
"""
# See https://github.com/pypa/setuptools/issues/134 for info
# on script file naming and downstream issues with SVR4
name = 'script.tmpl'
if dev_path:
name = name.replace('.tmpl', ' (dev).tmpl')
raw_bytes = resource_string('setuptools', name)
return raw_bytes.decode('utf-8')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir, x) for x in blockers]
)
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
if self.dry_run:
return
mask = current_umask()
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
with open(target, "w" + mode) as f:
f.write(contents)
chmod(target, 0o777 - mask)
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.whl'):
return [self.install_wheel(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" %
os.path.abspath(dist_filename)
)
if len(setups) > 1:
raise DistutilsError(
"Multiple setup scripts in %s" %
os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path, os.path.join(egg_path,
'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path, metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(
self.install_dir,
os.path.basename(egg_path),
)
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(
os.unlink,
(destination,),
"Removing " + destination,
)
try:
new_dist_is_zipped = False
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f, m = self.unpack_and_compile, "Extracting"
else:
new_dist_is_zipped = True
if egg_path.startswith(tmpdir):
f, m = shutil.move, "Moving"
else:
f, m = shutil.copy2, "Copying"
self.execute(
f,
(egg_path, destination),
(m + " %s to %s") % (
os.path.basename(egg_path),
os.path.dirname(destination)
),
)
update_dist_caches(
destination,
fix_zipimporter_caches=new_dist_is_zipped,
)
except Exception:
update_dist_caches(destination, fix_zipimporter_caches=False)
raise
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata', 'name'),
version=cfg.get('metadata', 'version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg')
dist.location = egg_path
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf, 'w')
f.write('Metadata-Version: 1.0\n')
for k, v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
f.close()
script_dir = os.path.join(_egg_info, 'scripts')
# delete entry-point scripts to avoid duping
self.delete_blockers([
os.path.join(script_dir, args[0])
for args in ScriptWriter.get_args(dist)
])
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run,
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src, dst):
s = src.lower()
for old, new in prefixes:
if s.startswith(old):
src = new + src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old != 'SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(
os.path.join(egg_tmp, 'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level', 'native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
if not os.path.exists(txt):
f = open(txt, 'w')
f.write('\n'.join(locals()[name]) + '\n')
f.close()
def install_wheel(self, wheel_path, tmpdir):
wheel = Wheel(wheel_path)
assert wheel.is_compatible()
destination = os.path.join(self.install_dir, wheel.egg_name())
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(
os.unlink,
(destination,),
"Removing " + destination,
)
try:
self.execute(
wheel.install_as_egg,
(destination,),
("Installing %s to %s") % (
os.path.basename(wheel_path),
os.path.dirname(destination)
),
)
finally:
update_dist_caches(destination, fix_zipimporter_caches=False)
self.add_output(destination)
return self.egg_distribution(destination)
__mv_warning = textwrap.dedent("""
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
""").lstrip() # noqa
__id_warning = textwrap.dedent("""
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
""") # noqa
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += '\n' + self.__mv_warning
if self.install_dir not in map(normalize_path, sys.path):
msg += '\n' + self.__id_warning
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
__editable_msg = textwrap.dedent("""
Extracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""").lstrip() # noqa
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return '\n' + self.__editable_msg % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose > 2:
v = 'v' * (self.verbose - 1)
args.insert(0, '-' + v)
elif self.verbose < 2:
args.insert(0, '-q')
if self.dry_run:
args.insert(0, '-n')
log.info(
"Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit as v:
raise DistutilsError(
"Setup script exited with %s" % (v.args[0],)
) from v
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives:
continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist,
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key == 'setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir, 'setuptools.pth')
if os.path.islink(filename):
os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location) + '\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src, dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
chmod(f, mode)
def byte_compile(self, to_compile):
if sys.dont_write_bytecode:
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run,
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
__no_default_msg = textwrap.dedent("""
bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://setuptools.readthedocs.io/en/latest/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again.
""").strip()
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in six.iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
INSTALL_SCHEMES = dict(
posix=dict(
install_dir='$base/lib/python$py_version_short/site-packages',
script_dir='$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir='$base/Lib/site-packages',
script_dir='$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
for attr, val in scheme.items():
if getattr(self, attr, None) is None:
setattr(self, attr, val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def _pythonpath():
items = os.environ.get('PYTHONPATH', '').split(os.pathsep)
return filter(None, items)
def get_site_dirs():
"""
Return a list of 'site' dirs
"""
sitedirs = []
# start with PYTHONPATH
sitedirs.extend(_pythonpath())
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([
os.path.join(
prefix,
"lib",
"python{}.{}".format(*sys.version_info),
"site-packages",
),
os.path.join(prefix, "lib", "site-python"),
])
else:
sitedirs.extend([
prefix,
os.path.join(prefix, "lib", "site-packages"),
])
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
home_sp = os.path.join(
home,
'Library',
'Python',
'{}.{}'.format(*sys.version_info),
'site-packages',
)
sitedirs.append(home_sp)
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs:
sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
try:
sitedirs.extend(site.getsitepackages())
except AttributeError:
pass
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth', 'setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname, name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a configparser.RawConfigParser, or None
"""
f = open(dist_filename, 'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended - 12)
tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended - (12 + cfglen))
init = {'version': '', 'target_version': ''}
cfg = configparser.RawConfigParser(init)
try:
part = f.read(cfglen)
# Read up to the first null byte.
config = part.split(b'\0', 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(six.StringIO(config))
except configparser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''),
('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts) == 3 and parts[2] == 'PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB', 'PLATLIB'):
contents = z.read(name)
if not six.PY2:
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\', '/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
finally:
z.close()
prefixes = [(x.lower(), y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename, 'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir, path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
rel_paths = list(map(self.make_relative, self.paths))
if rel_paths:
log.debug("Saving %s", self.filename)
lines = self._wrap_lines(rel_paths)
data = '\n'.join(lines) + '\n'
if os.path.islink(self.filename):
os.unlink(self.filename)
with open(self.filename, 'wt') as f:
f.write(data)
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
@staticmethod
def _wrap_lines(lines):
return lines
def add(self, dist):
"""Add `dist` to the distribution map"""
new_path = (
dist.location not in self.paths and (
dist.location not in self.sitedirs or
# account for '.' being in PYTHONPATH
dist.location == os.getcwd()
)
)
if new_path:
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self, path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep == '/' and '/' or os.sep
while len(npath) >= baselen:
if npath == self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
class RewritePthDistributions(PthDistributions):
@classmethod
def _wrap_lines(cls, lines):
yield cls.prelude
for line in lines:
yield line
yield cls.postlude
prelude = _one_liner("""
import sys
sys.__plen = len(sys.path)
""")
postlude = _one_liner("""
import sys
new = sys.path[sys.__plen:]
del sys.path[sys.__plen:]
p = getattr(sys, '__egginsert', 0)
sys.path[p:p] = new
sys.__egginsert = p + len(new)
""")
if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite':
PthDistributions = RewritePthDistributions
def _first_line_re():
"""
Return a regular expression based on first_line_re suitable for matching
strings.
"""
if isinstance(first_line_re.pattern, str):
return first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
return re.compile(first_line_re.pattern.decode())
def auto_chmod(func, arg, exc):
if func in [os.unlink, os.remove] and os.name == 'nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
six.reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg))))
def update_dist_caches(dist_path, fix_zipimporter_caches):
"""
Fix any globally cached `dist_path` related data
`dist_path` should be a path of a newly installed egg distribution (zipped
or unzipped).
sys.path_importer_cache contains finder objects that have been cached when
importing data from the original distribution. Any such finders need to be
cleared since the replacement distribution might be packaged differently,
e.g. a zipped egg distribution might get replaced with an unzipped egg
folder or vice versa. Having the old finders cached may then cause Python
to attempt loading modules from the replacement distribution using an
incorrect loader.
zipimport.zipimporter objects are Python loaders charged with importing
data packaged inside zip archives. If stale loaders referencing the
original distribution, are left behind, they can fail to load modules from
the replacement distribution. E.g. if an old zipimport.zipimporter instance
is used to load data from a new zipped egg archive, it may cause the
operation to attempt to locate the requested data in the wrong location -
one indicated by the original distribution's zip archive directory
information. Such an operation may then fail outright, e.g. report having
read a 'bad local file header', or even worse, it may fail silently &
return invalid data.
zipimport._zip_directory_cache contains cached zip archive directory
information for all existing zipimport.zipimporter instances and all such
instances connected to the same archive share the same cached directory
information.
If asked, and the underlying Python implementation allows it, we can fix
all existing zipimport.zipimporter instances instead of having to track
them down and remove them one by one, by updating their shared cached zip
archive directory information. This, of course, assumes that the
replacement distribution is packaged as a zipped egg.
If not asked to fix existing zipimport.zipimporter instances, we still do
our best to clear any remaining zipimport.zipimporter related cached data
that might somehow later get used when attempting to load data from the new
distribution and thus cause such load operations to fail. Note that when
tracking down such remaining stale data, we can not catch every conceivable
usage from here, and we clear only those that we know of and have found to
cause problems if left alive. Any remaining caches should be updated by
whomever is in charge of maintaining them, i.e. they should be ready to
handle us replacing their zip archives with new distributions at runtime.
"""
# There are several other known sources of stale zipimport.zipimporter
# instances that we do not clear here, but might if ever given a reason to
# do so:
# * Global setuptools pkg_resources.working_set (a.k.a. 'master working
# set') may contain distributions which may in turn contain their
# zipimport.zipimporter loaders.
# * Several zipimport.zipimporter loaders held by local variables further
# up the function call stack when running the setuptools installation.
# * Already loaded modules may have their __loader__ attribute set to the
# exact loader instance used when importing them. Python 3.4 docs state
# that this information is intended mostly for introspection and so is
# not expected to cause us problems.
normalized_path = normalize_path(dist_path)
_uncache(normalized_path, sys.path_importer_cache)
if fix_zipimporter_caches:
_replace_zip_directory_cache_data(normalized_path)
else:
# Here, even though we do not want to fix existing and now stale
# zipimporter cache information, we still want to remove it. Related to
# Python's zip archive directory information cache, we clear each of
# its stale entries in two phases:
# 1. Clear the entry so attempting to access zip archive information
# via any existing stale zipimport.zipimporter instances fails.
# 2. Remove the entry from the cache so any newly constructed
# zipimport.zipimporter instances do not end up using old stale
# zip archive directory information.
# This whole stale data removal step does not seem strictly necessary,
# but has been left in because it was done before we started replacing
# the zip archive directory information cache content if possible, and
# there are no relevant unit tests that we can depend on to tell us if
# this is really needed.
_remove_and_clear_zip_directory_cache_data(normalized_path)
def _collect_zipimporter_cache_entries(normalized_path, cache):
"""
Return zipimporter cache entry keys related to a given normalized path.
Alternative path spellings (e.g. those using different character case or
those using alternative path separators) related to the same path are
included. Any sub-path entries are included as well, i.e. those
corresponding to zip archives embedded in other zip archives.
"""
result = []
prefix_len = len(normalized_path)
for p in cache:
np = normalize_path(p)
if (np.startswith(normalized_path) and
np[prefix_len:prefix_len + 1] in (os.sep, '')):
result.append(p)
return result
def _update_zipimporter_cache(normalized_path, cache, updater=None):
"""
Update zipimporter cache data for a given normalized path.
Any sub-path entries are processed as well, i.e. those corresponding to zip
archives embedded in other zip archives.
Given updater is a callable taking a cache entry key and the original entry
(after already removing the entry from the cache), and expected to update
the entry and possibly return a new one to be inserted in its place.
Returning None indicates that the entry should not be replaced with a new
one. If no updater is given, the cache entries are simply removed without
any additional processing, the same as if the updater simply returned None.
"""
for p in _collect_zipimporter_cache_entries(normalized_path, cache):
# N.B. pypy's custom zipimport._zip_directory_cache implementation does
# not support the complete dict interface:
# * Does not support item assignment, thus not allowing this function
# to be used only for removing existing cache entries.
# * Does not support the dict.pop() method, forcing us to use the
# get/del patterns instead. For more detailed information see the
# following links:
# https://github.com/pypa/setuptools/issues/202#issuecomment-202913420
# http://bit.ly/2h9itJX
old_entry = cache[p]
del cache[p]
new_entry = updater and updater(p, old_entry)
if new_entry is not None:
cache[p] = new_entry
def _uncache(normalized_path, cache):
_update_zipimporter_cache(normalized_path, cache)
def _remove_and_clear_zip_directory_cache_data(normalized_path):
def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
old_entry.clear()
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=clear_and_remove_cached_zip_archive_directory_data)
# PyPy Python implementation does not allow directly writing to the
# zipimport._zip_directory_cache and so prevents us from attempting to correct
# its content. The best we can do there is clear the problematic cache content
# and have PyPy repopulate it as needed. The downside is that if there are any
# stale zipimport.zipimporter instances laying around, attempting to use them
# will fail due to not having its zip archive directory information available
# instead of being automatically corrected to use the new correct zip archive
# directory information.
if '__pypy__' in sys.builtin_module_names:
_replace_zip_directory_cache_data = \
_remove_and_clear_zip_directory_cache_data
else:
def _replace_zip_directory_cache_data(normalized_path):
def replace_cached_zip_archive_directory_data(path, old_entry):
# N.B. In theory, we could load the zip directory information just
# once for all updated path spellings, and then copy it locally and
# update its contained path strings to contain the correct
# spelling, but that seems like a way too invasive move (this cache
# structure is not officially documented anywhere and could in
# theory change with new Python releases) for no significant
# benefit.
old_entry.clear()
zipimport.zipimporter(path)
old_entry.update(zipimport._zip_directory_cache[path])
return old_entry
_update_zipimporter_cache(
normalized_path, zipimport._zip_directory_cache,
updater=replace_cached_zip_archive_directory_data)
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
with io.open(executable, encoding='latin-1') as fp:
magic = fp.read(2)
except (OSError, IOError):
return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
return subprocess.list2cmdline([arg])
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args):
pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error as e:
log.debug("chmod failed: %s", e)
class CommandSpec(list):
"""
A command spec for a #! header, specified as a list of arguments akin to
those passed to Popen.
"""
options = []
split_args = dict()
@classmethod
def best(cls):
"""
Choose the best CommandSpec class based on environmental conditions.
"""
return cls
@classmethod
def _sys_executable(cls):
_default = os.path.normpath(sys.executable)
return os.environ.get('__PYVENV_LAUNCHER__', _default)
@classmethod
def from_param(cls, param):
"""
Construct a CommandSpec from a parameter to build_scripts, which may
be None.
"""
if isinstance(param, cls):
return param
if isinstance(param, list):
return cls(param)
if param is None:
return cls.from_environment()
# otherwise, assume it's a string.
return cls.from_string(param)
@classmethod
def from_environment(cls):
return cls([cls._sys_executable()])
@classmethod
def from_string(cls, string):
"""
Construct a command spec from a simple string representing a command
line parseable by shlex.split.
"""
items = shlex.split(string, **cls.split_args)
return cls(items)
def install_options(self, script_text):
self.options = shlex.split(self._extract_options(script_text))
cmdline = subprocess.list2cmdline(self)
if not isascii(cmdline):
self.options[:0] = ['-x']
@staticmethod
def _extract_options(orig_script):
"""
Extract any options from the first line of the script.
"""
first = (orig_script + '\n').splitlines()[0]
match = _first_line_re().match(first)
options = match.group(1) or '' if match else ''
return options.strip()
def as_header(self):
return self._render(self + list(self.options))
@staticmethod
def _strip_quotes(item):
_QUOTES = '"\''
for q in _QUOTES:
if item.startswith(q) and item.endswith(q):
return item[1:-1]
return item
@staticmethod
def _render(items):
cmdline = subprocess.list2cmdline(
CommandSpec._strip_quotes(item.strip()) for item in items)
return '#!' + cmdline + '\n'
# For pbr compat; will be removed in a future version.
sys_executable = CommandSpec._sys_executable()
class WindowsCommandSpec(CommandSpec):
split_args = dict(posix=False)
class ScriptWriter:
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent(r"""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
import re
import sys
# for compatibility with easy_install; see #2198
__requires__ = %(spec)r
try:
from importlib.metadata import distribution
except ImportError:
try:
from importlib_metadata import distribution
except ImportError:
from pkg_resources import load_entry_point
def importlib_load_entry_point(spec, group, name):
dist_name, _, _ = spec.partition('==')
matches = (
entry_point
for entry_point in distribution(dist_name).entry_points
if entry_point.group == group and entry_point.name == name
)
return next(matches).load()
globals().setdefault('load_entry_point', importlib_load_entry_point)
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(load_entry_point(%(spec)r, %(group)r, %(name)r)())
""").lstrip()
command_spec_class = CommandSpec
@classmethod
def get_script_args(cls, dist, executable=None, wininst=False):
# for backward compatibility
warnings.warn("Use get_args", EasyInstallDeprecationWarning)
writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
header = cls.get_script_header("", executable, wininst)
return writer.get_args(dist, header)
@classmethod
def get_script_header(cls, script_text, executable=None, wininst=False):
# for backward compatibility
warnings.warn(
"Use get_header", EasyInstallDeprecationWarning, stacklevel=2)
if wininst:
executable = "python.exe"
return cls.get_header(script_text, executable)
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
cls._ensure_safe_name(name)
script_text = cls.template % locals()
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
@staticmethod
def _ensure_safe_name(name):
"""
Prevent paths in *_scripts entry point names.
"""
has_path_sep = re.search(r'[\\/]', name)
if has_path_sep:
raise ValueError("Path separators not allowed in script names")
@classmethod
def get_writer(cls, force_windows):
# for backward compatibility
warnings.warn("Use best", EasyInstallDeprecationWarning)
return WindowsScriptWriter.best() if force_windows else cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter for this environment.
"""
if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
return WindowsScriptWriter.best()
else:
return cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header + script_text)
@classmethod
def get_header(cls, script_text="", executable=None):
"""Create a #! line, getting options (if any) from script_text"""
cmd = cls.command_spec_class.best().from_param(executable)
cmd.install_options(script_text)
return cmd.as_header()
class WindowsScriptWriter(ScriptWriter):
command_spec_class = WindowsCommandSpec
@classmethod
def get_writer(cls):
# for backward compatibility
warnings.warn("Use best", EasyInstallDeprecationWarning)
return cls.best()
@classmethod
def best(cls):
"""
Select the best ScriptWriter suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
msg = (
"{ext} not listed in PATHEXT; scripts will not be "
"recognized as executables."
).format(**locals())
warnings.warn(msg, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield name + ext, header + script_text, 't', blockers
@classmethod
def _adjust_header(cls, type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
return new_header if cls._use_header(new_header) else orig_header
@staticmethod
def _use_header(new_header):
"""
Should _adjust_header use the replaced header?
On non-windows systems, always use. On
Windows systems, only use the replaced header if it resolves
to an executable on the system.
"""
clean_header = new_header[2:-1].strip('"')
return sys.platform != 'win32' or find_executable(clean_header)
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_ == 'gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py', '.pyc', '.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name + x for x in old]
yield (name + ext, hdr + script_text, 't', blockers)
yield (
name + '.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
get_script_header = ScriptWriter.get_script_header
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if six.PY2:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
return shutil.rmtree(path, ignore_errors, onerror)
def current_umask():
tmp = os.umask(0o022)
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self, *args, **kw):
with _patch_usage():
Distribution._show_help(self, *args, **kw)
if argv is None:
argv = sys.argv[1:]
with _patch_usage():
setup(
script_args=['-q', 'easy_install', '-v'] + argv,
script_name=sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands,
**kw
)
@contextlib.contextmanager
def _patch_usage():
import distutils.core
USAGE = textwrap.dedent("""
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
""").lstrip()
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
saved = distutils.core.gen_usage
distutils.core.gen_usage = gen_usage
try:
yield
finally:
distutils.core.gen_usage = saved
class EasyInstallDeprecationWarning(SetuptoolsDeprecationWarning):
"""
Warning for EasyInstall deprecations, bypassing suppression.
"""
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/launcher manifest.xml
|
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity version="1.0.0.0"
processorArchitecture="X86"
name="%(name)s"
type="win32"/>
<!-- Identify the application security requirements. -->
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"/>
</requestedPrivileges>
</security>
</trustInfo>
</assembly>
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/install_scripts.py
|
from distutils import log
import distutils.command.install_scripts as orig
import os
import sys
from pkg_resources import Distribution, PathMetadata, ensure_directory
class install_scripts(orig.install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
orig.install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
import setuptools.command.easy_install as ei
self.run_command("egg_info")
if self.distribution.scripts:
orig.install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
exec_param = getattr(bs_cmd, 'executable', None)
try:
bw_cmd = self.get_finalized_command("bdist_wininst")
is_wininst = getattr(bw_cmd, '_is_running', False)
except ImportError:
is_wininst = False
writer = ei.ScriptWriter
if is_wininst:
exec_param = "python.exe"
writer = ei.WindowsScriptWriter
if exec_param == sys.executable:
# In case the path to the Python executable contains a space, wrap
# it so it's not split up.
exec_param = [exec_param]
# resolve the writer to the environment
writer = writer.best()
cmd = writer.command_spec_class.best().from_param(exec_param)
for args in writer.get_args(dist, cmd.as_header()):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/upload.py
|
from distutils import log
from distutils.command import upload as orig
from setuptools.errors import RemovedCommandError
class upload(orig.upload):
"""Formerly used to upload packages to PyPI."""
def run(self):
msg = (
"The upload command has been removed, use twine to upload "
+ "instead (https://pypi.org/p/twine)"
)
self.announce("ERROR: " + msg, log.ERROR)
raise RemovedCommandError(msg)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/register.py
|
from distutils import log
import distutils.command.register as orig
from setuptools.errors import RemovedCommandError
class register(orig.register):
"""Formerly used to register packages on PyPI."""
def run(self):
msg = (
"The register command has been removed, use twine to upload "
+ "instead (https://pypi.org/p/twine)"
)
self.announce("ERROR: " + msg, log.ERROR)
raise RemovedCommandError(msg)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/bdist_wininst.py
|
import distutils.command.bdist_wininst as orig
import warnings
from setuptools import SetuptoolsDeprecationWarning
class bdist_wininst(orig.bdist_wininst):
def reinitialize_command(self, command, reinit_subcommands=0):
"""
Supplement reinitialize_command to work around
http://bugs.python.org/issue20819
"""
cmd = self.distribution.reinitialize_command(
command, reinit_subcommands)
if command in ('install', 'install_lib'):
cmd.install_lib = None
return cmd
def run(self):
warnings.warn(
"bdist_wininst is deprecated and will be removed in a future "
"version. Use bdist_wheel (wheel packages) instead.",
SetuptoolsDeprecationWarning
)
self._is_running = True
try:
orig.bdist_wininst.run(self)
finally:
self._is_running = False
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/dist_info.py
|
"""
Create a dist_info directory
As defined in the wheel specification
"""
import os
from distutils.core import Command
from distutils import log
class dist_info(Command):
description = 'create a .dist-info directory'
user_options = [
('egg-base=', 'e', "directory containing .egg-info directories"
" (default: top of the source tree)"),
]
def initialize_options(self):
self.egg_base = None
def finalize_options(self):
pass
def run(self):
egg_info = self.get_finalized_command('egg_info')
egg_info.egg_base = self.egg_base
egg_info.finalize_options()
egg_info.run()
dist_info_dir = egg_info.egg_info[:-len('.egg-info')] + '.dist-info'
log.info("creating '{}'".format(os.path.abspath(dist_info_dir)))
bdist_wheel = self.get_finalized_command('bdist_wheel')
bdist_wheel.egg2dist(egg_info.egg_info, dist_info_dir)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/install_lib.py
|
import os
import sys
from itertools import product, starmap
import distutils.command.install_lib as orig
class install_lib(orig.install_lib):
"""Don't add compiled flags to filenames of non-Python files"""
def run(self):
self.build()
outfiles = self.install()
if outfiles is not None:
# always compile, in case we have any extension stubs to deal with
self.byte_compile(outfiles)
def get_exclusions(self):
"""
Return a collections.Sized collections.Container of paths to be
excluded for single_version_externally_managed installations.
"""
all_packages = (
pkg
for ns_pkg in self._get_SVEM_NSPs()
for pkg in self._all_packages(ns_pkg)
)
excl_specs = product(all_packages, self._gen_exclusion_paths())
return set(starmap(self._exclude_pkg_path, excl_specs))
def _exclude_pkg_path(self, pkg, exclusion_path):
"""
Given a package name and exclusion path within that package,
compute the full exclusion path.
"""
parts = pkg.split('.') + [exclusion_path]
return os.path.join(self.install_dir, *parts)
@staticmethod
def _all_packages(pkg_name):
"""
>>> list(install_lib._all_packages('foo.bar.baz'))
['foo.bar.baz', 'foo.bar', 'foo']
"""
while pkg_name:
yield pkg_name
pkg_name, sep, child = pkg_name.rpartition('.')
def _get_SVEM_NSPs(self):
"""
Get namespace packages (list) but only for
single_version_externally_managed installations and empty otherwise.
"""
# TODO: is it necessary to short-circuit here? i.e. what's the cost
# if get_finalized_command is called even when namespace_packages is
# False?
if not self.distribution.namespace_packages:
return []
install_cmd = self.get_finalized_command('install')
svem = install_cmd.single_version_externally_managed
return self.distribution.namespace_packages if svem else []
@staticmethod
def _gen_exclusion_paths():
"""
Generate file paths to be excluded for namespace packages (bytecode
cache files).
"""
# always exclude the package module itself
yield '__init__.py'
yield '__init__.pyc'
yield '__init__.pyo'
if not hasattr(sys, 'implementation'):
return
base = os.path.join(
'__pycache__', '__init__.' + sys.implementation.cache_tag)
yield base + '.pyc'
yield base + '.pyo'
yield base + '.opt-1.pyc'
yield base + '.opt-2.pyc'
def copy_tree(
self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
):
assert preserve_mode and preserve_times and not preserve_symlinks
exclude = self.get_exclusions()
if not exclude:
return orig.install_lib.copy_tree(self, infile, outfile)
# Exclude namespace package __init__.py* files from the output
from setuptools.archive_util import unpack_directory
from distutils import log
outfiles = []
def pf(src, dst):
if dst in exclude:
log.warn("Skipping installation of %s (namespace package)",
dst)
return False
log.info("copying %s -> %s", src, os.path.dirname(dst))
outfiles.append(dst)
return dst
unpack_directory(infile, outfile, pf)
return outfiles
def get_outputs(self):
outputs = orig.install_lib.get_outputs(self)
exclude = self.get_exclusions()
if exclude:
return [f for f in outputs if f not in exclude]
return outputs
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/upload_docs.py
|
# -*- coding: utf-8 -*-
"""upload_docs
Implements a Distutils 'upload_docs' subcommand (upload documentation to
PyPI's pythonhosted.org).
"""
from base64 import standard_b64encode
from distutils import log
from distutils.errors import DistutilsOptionError
import os
import socket
import zipfile
import tempfile
import shutil
import itertools
import functools
from setuptools.extern import six
from setuptools.extern.six.moves import http_client, urllib
from pkg_resources import iter_entry_points
from .upload import upload
def _encode(s):
errors = 'strict' if six.PY2 else 'surrogateescape'
return s.encode('utf-8', errors)
class upload_docs(upload):
# override the default repository as upload_docs isn't
# supported by Warehouse (and won't be).
DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'
description = 'Upload documentation to PyPI'
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server'),
('upload-dir=', None, 'directory to upload'),
]
boolean_options = upload.boolean_options
def has_sphinx(self):
if self.upload_dir is None:
for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
return True
sub_commands = [('build_sphinx', has_sphinx)]
def initialize_options(self):
upload.initialize_options(self)
self.upload_dir = None
self.target_dir = None
def finalize_options(self):
upload.finalize_options(self)
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command('build_sphinx')
self.target_dir = build_sphinx.builder_target_dir
else:
build = self.get_finalized_command('build')
self.target_dir = os.path.join(build.build_base, 'docs')
else:
self.ensure_dirname('upload_dir')
self.target_dir = self.upload_dir
if 'pypi.python.org' in self.repository:
log.warn("Upload_docs command is deprecated. Use RTD instead.")
self.announce('Using upload directory %s' % self.target_dir)
def create_zipfile(self, filename):
zip_file = zipfile.ZipFile(filename, "w")
try:
self.mkpath(self.target_dir) # just in case
for root, dirs, files in os.walk(self.target_dir):
if root == self.target_dir and not files:
tmpl = "no files found in upload directory '%s'"
raise DistutilsOptionError(tmpl % self.target_dir)
for name in files:
full = os.path.join(root, name)
relative = root[len(self.target_dir):].lstrip(os.path.sep)
dest = os.path.join(relative, name)
zip_file.write(full, dest)
finally:
zip_file.close()
def run(self):
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
tmp_dir = tempfile.mkdtemp()
name = self.distribution.metadata.get_name()
zip_file = os.path.join(tmp_dir, "%s.zip" % name)
try:
self.create_zipfile(zip_file)
self.upload_file(zip_file)
finally:
shutil.rmtree(tmp_dir)
@staticmethod
def _build_part(item, sep_boundary):
key, values = item
title = '\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(values, list):
values = [values]
for value in values:
if isinstance(value, tuple):
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = _encode(value)
yield sep_boundary
yield _encode(title)
yield b"\n\n"
yield value
if value and value[-1:] == b'\r':
yield b'\n' # write an extra newline (lurve Macs)
@classmethod
def _build_multipart(cls, data):
"""
Build up the MIME payload for the POST data
"""
boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b'\n--' + boundary.encode('ascii')
end_boundary = sep_boundary + b'--'
end_items = end_boundary, b"\n",
builder = functools.partial(
cls._build_part,
sep_boundary=sep_boundary,
)
part_groups = map(builder, data.items())
parts = itertools.chain.from_iterable(part_groups)
body_items = itertools.chain(parts, end_items)
content_type = 'multipart/form-data; boundary=%s' % boundary
return b''.join(body_items), content_type
def upload_file(self, filename):
with open(filename, 'rb') as f:
content = f.read()
meta = self.distribution.metadata
data = {
':action': 'doc_upload',
'name': meta.get_name(),
'content': (os.path.basename(filename), content),
}
# set up the authentication
credentials = _encode(self.username + ':' + self.password)
credentials = standard_b64encode(credentials)
if not six.PY2:
credentials = credentials.decode('ascii')
auth = "Basic " + credentials
body, ct = self._build_multipart(data)
msg = "Submitting documentation to %s" % (self.repository)
self.announce(msg, log.INFO)
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urllib.parse.urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
conn = http_client.HTTPConnection(netloc)
elif schema == 'https':
conn = http_client.HTTPSConnection(netloc)
else:
raise AssertionError("unsupported schema " + schema)
data = ''
try:
conn.connect()
conn.putrequest("POST", url)
content_type = ct
conn.putheader('Content-type', content_type)
conn.putheader('Content-length', str(len(body)))
conn.putheader('Authorization', auth)
conn.endheaders()
conn.send(body)
except socket.error as e:
self.announce(str(e), log.ERROR)
return
r = conn.getresponse()
if r.status == 200:
msg = 'Server response (%s): %s' % (r.status, r.reason)
self.announce(msg, log.INFO)
elif r.status == 301:
location = r.getheader('Location')
if location is None:
location = 'https://pythonhosted.org/%s/' % meta.get_name()
msg = 'Upload successful. Visit %s' % location
self.announce(msg, log.INFO)
else:
msg = 'Upload failed (%s): %s' % (r.status, r.reason)
self.announce(msg, log.ERROR)
if self.show_response:
print('-' * 75, r.read(), '-' * 75)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/build_py.py
|
from glob import glob
from distutils.util import convert_path
import distutils.command.build_py as orig
import os
import fnmatch
import textwrap
import io
import distutils.errors
import itertools
import stat
from setuptools.extern import six
from setuptools.extern.six.moves import map, filter, filterfalse
try:
from setuptools.lib2to3_ex import Mixin2to3
except ImportError:
class Mixin2to3:
def run_2to3(self, files, doctests=True):
"do nothing"
def make_writable(target):
os.chmod(target, os.stat(target).st_mode | stat.S_IWRITE)
class build_py(orig.build_py, Mixin2to3):
"""Enhanced 'build_py' command that includes data files with packages
The data files are specified via a 'package_data' argument to 'setup()'.
See 'setuptools.dist.Distribution' for more details.
Also, this version of the 'build_py' command allows you to specify both
'py_modules' and 'packages' in the same setup operation.
"""
def finalize_options(self):
orig.build_py.finalize_options(self)
self.package_data = self.distribution.package_data
self.exclude_package_data = (self.distribution.exclude_package_data or
{})
if 'data_files' in self.__dict__:
del self.__dict__['data_files']
self.__updated_files = []
self.__doctests_2to3 = []
def run(self):
"""Build modules, packages, and copy data files to build directory"""
if not self.py_modules and not self.packages:
return
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.build_package_data()
self.run_2to3(self.__updated_files, False)
self.run_2to3(self.__updated_files, True)
self.run_2to3(self.__doctests_2to3, True)
# Only compile actual .py files, using our base class' idea of what our
# output files are.
self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
def __getattr__(self, attr):
"lazily compute data files"
if attr == 'data_files':
self.data_files = self._get_data_files()
return self.data_files
return orig.build_py.__getattr__(self, attr)
def build_module(self, module, module_file, package):
if six.PY2 and isinstance(package, six.string_types):
# avoid errors on Python 2 when unicode is passed (#190)
package = package.split('.')
outfile, copied = orig.build_py.build_module(self, module, module_file,
package)
if copied:
self.__updated_files.append(outfile)
return outfile, copied
def _get_data_files(self):
"""Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
self.analyze_manifest()
return list(map(self._get_pkg_data_files, self.packages or ()))
def _get_pkg_data_files(self, package):
# Locate package source directory
src_dir = self.get_package_dir(package)
# Compute package build directory
build_dir = os.path.join(*([self.build_lib] + package.split('.')))
# Strip directory from globbed filenames
filenames = [
os.path.relpath(file, src_dir)
for file in self.find_data_files(package, src_dir)
]
return package, src_dir, build_dir, filenames
def find_data_files(self, package, src_dir):
"""Return filenames for package's data files in 'src_dir'"""
patterns = self._get_platform_patterns(
self.package_data,
package,
src_dir,
)
globs_expanded = map(glob, patterns)
# flatten the expanded globs into an iterable of matches
globs_matches = itertools.chain.from_iterable(globs_expanded)
glob_files = filter(os.path.isfile, globs_matches)
files = itertools.chain(
self.manifest_files.get(package, []),
glob_files,
)
return self.exclude_data_files(package, src_dir, files)
def build_package_data(self):
"""Copy data files into build directory"""
for package, src_dir, build_dir, filenames in self.data_files:
for filename in filenames:
target = os.path.join(build_dir, filename)
self.mkpath(os.path.dirname(target))
srcfile = os.path.join(src_dir, filename)
outf, copied = self.copy_file(srcfile, target)
make_writable(target)
srcfile = os.path.abspath(srcfile)
if (copied and
srcfile in self.distribution.convert_2to3_doctests):
self.__doctests_2to3.append(outf)
def analyze_manifest(self):
self.manifest_files = mf = {}
if not self.distribution.include_package_data:
return
src_dirs = {}
for package in self.packages or ():
# Locate package source directory
src_dirs[assert_relative(self.get_package_dir(package))] = package
self.run_command('egg_info')
ei_cmd = self.get_finalized_command('egg_info')
for path in ei_cmd.filelist.files:
d, f = os.path.split(assert_relative(path))
prev = None
oldf = f
while d and d != prev and d not in src_dirs:
prev = d
d, df = os.path.split(d)
f = os.path.join(df, f)
if d in src_dirs:
if path.endswith('.py') and f == oldf:
continue # it's a module, not data
mf.setdefault(src_dirs[d], []).append(path)
def get_data_files(self):
pass # Lazily compute data files in _get_data_files() function.
def check_package(self, package, package_dir):
"""Check namespace packages' __init__ for declare_namespace"""
try:
return self.packages_checked[package]
except KeyError:
pass
init_py = orig.build_py.check_package(self, package, package_dir)
self.packages_checked[package] = init_py
if not init_py or not self.distribution.namespace_packages:
return init_py
for pkg in self.distribution.namespace_packages:
if pkg == package or pkg.startswith(package + '.'):
break
else:
return init_py
with io.open(init_py, 'rb') as f:
contents = f.read()
if b'declare_namespace' not in contents:
raise distutils.errors.DistutilsError(
"Namespace package problem: %s is a namespace package, but "
"its\n__init__.py does not call declare_namespace()! Please "
'fix it.\n(See the setuptools manual under '
'"Namespace Packages" for details.)\n"' % (package,)
)
return init_py
def initialize_options(self):
self.packages_checked = {}
orig.build_py.initialize_options(self)
def get_package_dir(self, package):
res = orig.build_py.get_package_dir(self, package)
if self.distribution.src_root is not None:
return os.path.join(self.distribution.src_root, res)
return res
def exclude_data_files(self, package, src_dir, files):
"""Filter filenames for package's data files in 'src_dir'"""
files = list(files)
patterns = self._get_platform_patterns(
self.exclude_package_data,
package,
src_dir,
)
match_groups = (
fnmatch.filter(files, pattern)
for pattern in patterns
)
# flatten the groups of matches into an iterable of matches
matches = itertools.chain.from_iterable(match_groups)
bad = set(matches)
keepers = (
fn
for fn in files
if fn not in bad
)
# ditch dupes
return list(_unique_everseen(keepers))
@staticmethod
def _get_platform_patterns(spec, package, src_dir):
"""
yield platform-specific path patterns (suitable for glob
or fn_match) from a glob-based spec (such as
self.package_data or self.exclude_package_data)
matching package in src_dir.
"""
raw_patterns = itertools.chain(
spec.get('', []),
spec.get(package, []),
)
return (
# Each pattern has to be converted to a platform-specific path
os.path.join(src_dir, convert_path(pattern))
for pattern in raw_patterns
)
# from Python docs
def _unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def assert_relative(path):
if not os.path.isabs(path):
return path
from distutils.errors import DistutilsSetupError
msg = textwrap.dedent("""
Error: setup script specifies an absolute path:
%s
setup() arguments must *always* be /-separated paths relative to the
setup.py directory, *never* absolute paths.
""").lstrip() % path
raise DistutilsSetupError(msg)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/__init__.py
|
__all__ = [
'alias', 'bdist_egg', 'bdist_rpm', 'build_ext', 'build_py', 'develop',
'easy_install', 'egg_info', 'install', 'install_lib', 'rotate', 'saveopts',
'sdist', 'setopt', 'test', 'install_egg_info', 'install_scripts',
'bdist_wininst', 'upload_docs', 'build_clib', 'dist_info',
]
from distutils.command.bdist import bdist
import sys
from setuptools.command import install_scripts
if 'egg' not in bdist.format_commands:
bdist.format_command['egg'] = ('bdist_egg', "Python .egg file")
bdist.format_commands.append('egg')
del bdist, sys
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/sdist.py
|
from distutils import log
import distutils.command.sdist as orig
import os
import sys
import io
import contextlib
from setuptools.extern import six, ordered_set
from .py36compat import sdist_add_defaults
import pkg_resources
_default_revctrl = list
def walk_revctrl(dirname=''):
"""Find all files under revision control"""
for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
for item in ep.load()(dirname):
yield item
class sdist(sdist_add_defaults, orig.sdist):
"""Smart sdist that finds anything supported by revision control"""
user_options = [
('formats=', None,
"formats for source distribution (comma-separated list)"),
('keep-temp', 'k',
"keep the distribution tree around after creating " +
"archive file(s)"),
('dist-dir=', 'd',
"directory to put the source distribution archive(s) in "
"[default: dist]"),
]
negative_opt = {}
README_EXTENSIONS = ['', '.rst', '.txt', '.md']
READMES = tuple('README{0}'.format(ext) for ext in README_EXTENSIONS)
def run(self):
self.run_command('egg_info')
ei_cmd = self.get_finalized_command('egg_info')
self.filelist = ei_cmd.filelist
self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt'))
self.check_readme()
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
self.make_distribution()
dist_files = getattr(self.distribution, 'dist_files', [])
for file in self.archive_files:
data = ('sdist', '', file)
if data not in dist_files:
dist_files.append(data)
def initialize_options(self):
orig.sdist.initialize_options(self)
self._default_to_gztar()
def _default_to_gztar(self):
# only needed on Python prior to 3.6.
if sys.version_info >= (3, 6, 0, 'beta', 1):
return
self.formats = ['gztar']
def make_distribution(self):
"""
Workaround for #516
"""
with self._remove_os_link():
orig.sdist.make_distribution(self)
@staticmethod
@contextlib.contextmanager
def _remove_os_link():
"""
In a context, remove and restore os.link if it exists
"""
class NoValue:
pass
orig_val = getattr(os, 'link', NoValue)
try:
del os.link
except Exception:
pass
try:
yield
finally:
if orig_val is not NoValue:
setattr(os, 'link', orig_val)
def __read_template_hack(self):
# This grody hack closes the template file (MANIFEST.in) if an
# exception occurs during read_template.
# Doing so prevents an error when easy_install attempts to delete the
# file.
try:
orig.sdist.read_template(self)
except Exception:
_, _, tb = sys.exc_info()
tb.tb_next.tb_frame.f_locals['template'].close()
raise
# Beginning with Python 2.7.2, 3.1.4, and 3.2.1, this leaky file handle
# has been fixed, so only override the method if we're using an earlier
# Python.
has_leaky_handle = (
sys.version_info < (2, 7, 2)
or (3, 0) <= sys.version_info < (3, 1, 4)
or (3, 2) <= sys.version_info < (3, 2, 1)
)
if has_leaky_handle:
read_template = __read_template_hack
def _add_defaults_optional(self):
if six.PY2:
sdist_add_defaults._add_defaults_optional(self)
else:
super()._add_defaults_optional()
if os.path.isfile('pyproject.toml'):
self.filelist.append('pyproject.toml')
def _add_defaults_python(self):
"""getting python files"""
if self.distribution.has_pure_modules():
build_py = self.get_finalized_command('build_py')
self.filelist.extend(build_py.get_source_files())
self._add_data_files(self._safe_data_files(build_py))
def _safe_data_files(self, build_py):
"""
Extracting data_files from build_py is known to cause
infinite recursion errors when `include_package_data`
is enabled, so suppress it in that case.
"""
if self.distribution.include_package_data:
return ()
return build_py.data_files
def _add_data_files(self, data_files):
"""
Add data files as found in build_py.data_files.
"""
self.filelist.extend(
os.path.join(src_dir, name)
for _, src_dir, _, filenames in data_files
for name in filenames
)
def _add_defaults_data_files(self):
try:
if six.PY2:
sdist_add_defaults._add_defaults_data_files(self)
else:
super()._add_defaults_data_files()
except TypeError:
log.warn("data_files contains unexpected objects")
def check_readme(self):
for f in self.READMES:
if os.path.exists(f):
return
else:
self.warn(
"standard file not found: should have one of " +
', '.join(self.READMES)
)
def make_release_tree(self, base_dir, files):
orig.sdist.make_release_tree(self, base_dir, files)
# Save any egg_info command line options used to create this sdist
dest = os.path.join(base_dir, 'setup.cfg')
if hasattr(os, 'link') and os.path.exists(dest):
# unlink and re-copy, since it might be hard-linked, and
# we don't want to change the source version
os.unlink(dest)
self.copy_file('setup.cfg', dest)
self.get_finalized_command('egg_info').save_version_info(dest)
def _manifest_is_not_generated(self):
# check for special comment used in 2.7.1 and higher
if not os.path.isfile(self.manifest):
return False
with io.open(self.manifest, 'rb') as fp:
first_line = fp.readline()
return (first_line !=
'# file GENERATED by distutils, do NOT edit\n'.encode())
def read_manifest(self):
"""Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution.
"""
log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest, 'rb')
for line in manifest:
# The manifest must contain UTF-8. See #303.
if not six.PY2:
try:
line = line.decode('UTF-8')
except UnicodeDecodeError:
log.warn("%r not UTF-8 decodable -- skipping" % line)
continue
# ignore comments and blank lines
line = line.strip()
if line.startswith('#') or not line:
continue
self.filelist.append(line)
manifest.close()
def check_license(self):
"""Checks if license_file' or 'license_files' is configured and adds any
valid paths to 'self.filelist'.
"""
files = ordered_set.OrderedSet()
opts = self.distribution.get_option_dict('metadata')
# ignore the source of the value
_, license_file = opts.get('license_file', (None, None))
if license_file is None:
log.debug("'license_file' option was not specified")
else:
files.add(license_file)
try:
files.update(self.distribution.metadata.license_files)
except TypeError:
log.warn("warning: 'license_files' option is malformed")
for f in files:
if not os.path.exists(f):
log.warn(
"warning: Failed to find the configured license file '%s'",
f)
files.remove(f)
self.filelist.extend(files)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/test.py
|
import os
import operator
import sys
import contextlib
import itertools
import unittest
from distutils.errors import DistutilsError, DistutilsOptionError
from distutils import log
from unittest import TestLoader
from setuptools.extern import six
from setuptools.extern.six.moves import map, filter
from pkg_resources import (resource_listdir, resource_exists, normalize_path,
working_set, _namespace_packages, evaluate_marker,
add_activation_listener, require, EntryPoint)
from setuptools import Command
from .build_py import _unique_everseen
__metaclass__ = type
class ScanningLoader(TestLoader):
def __init__(self):
TestLoader.__init__(self)
self._visited = set()
def loadTestsFromModule(self, module, pattern=None):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
if module in self._visited:
return None
self._visited.add(module)
tests = []
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(module.__name__, file + '/__init__.py'):
submodule = module.__name__ + '.' + file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests) != 1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
# adapted from jaraco.classes.properties:NonDataProperty
class NonDataProperty:
def __init__(self, fget):
self.fget = fget
def __get__(self, obj, objtype=None):
if obj is None:
return self
return self.fget(obj)
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build (deprecated)"
user_options = [
('test-module=', 'm', "Run 'test_suite' in specified module"),
('test-suite=', 's',
"Run single test, case or suite (e.g. 'module.test_suite')"),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
self.test_runner = None
def finalize_options(self):
if self.test_suite and self.test_module:
msg = "You may specify a module or a suite, but not both"
raise DistutilsOptionError(msg)
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module + ".test_suite"
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
@NonDataProperty
def test_args(self):
return list(self._test_args())
def _test_args(self):
if not self.test_suite and sys.version_info >= (2, 7):
yield 'discover'
if self.verbose:
yield '--verbose'
if self.test_suite:
yield self.test_suite
def with_project_on_sys_path(self, func):
"""
Backward compatibility for project_on_sys_path context.
"""
with self.project_on_sys_path():
func()
@contextlib.contextmanager
def project_on_sys_path(self, include_dists=[]):
with_2to3 = not six.PY2 and getattr(
self.distribution, 'use_2to3', False)
if with_2to3:
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
project_path = normalize_path(ei_cmd.egg_base)
sys.path.insert(0, project_path)
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
with self.paths_on_pythonpath([project_path]):
yield
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
@staticmethod
@contextlib.contextmanager
def paths_on_pythonpath(paths):
"""
Add the indicated paths to the head of the PYTHONPATH environment
variable so that subprocesses will also see the packages at
these paths.
Do this in a context that restores the value on exit.
"""
nothing = object()
orig_pythonpath = os.environ.get('PYTHONPATH', nothing)
current_pythonpath = os.environ.get('PYTHONPATH', '')
try:
prefix = os.pathsep.join(_unique_everseen(paths))
to_join = filter(None, [prefix, current_pythonpath])
new_path = os.pathsep.join(to_join)
if new_path:
os.environ['PYTHONPATH'] = new_path
yield
finally:
if orig_pythonpath is nothing:
os.environ.pop('PYTHONPATH', None)
else:
os.environ['PYTHONPATH'] = orig_pythonpath
@staticmethod
def install_dists(dist):
"""
Install the requirements indicated by self.distribution and
return an iterable of the dists that were built.
"""
ir_d = dist.fetch_build_eggs(dist.install_requires)
tr_d = dist.fetch_build_eggs(dist.tests_require or [])
er_d = dist.fetch_build_eggs(
v for k, v in dist.extras_require.items()
if k.startswith(':') and evaluate_marker(k[1:])
)
return itertools.chain(ir_d, tr_d, er_d)
def run(self):
self.announce(
"WARNING: Testing via this command is deprecated and will be "
"removed in a future version. Users looking for a generic test "
"entry point independent of test runner are encouraged to use "
"tox.",
log.WARN,
)
installed_dists = self.install_dists(self.distribution)
cmd = ' '.join(self._argv)
if self.dry_run:
self.announce('skipping "%s" (dry run)' % cmd)
return
self.announce('running "%s"' % cmd)
paths = map(operator.attrgetter('location'), installed_dists)
with self.paths_on_pythonpath(paths):
with self.project_on_sys_path():
self.run_tests()
def run_tests(self):
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if not six.PY2 and getattr(self.distribution, 'use_2to3', False):
module = self.test_suite.split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
test = unittest.main(
None, None, self._argv,
testLoader=self._resolve_as_ep(self.test_loader),
testRunner=self._resolve_as_ep(self.test_runner),
exit=False,
)
if not test.result.wasSuccessful():
msg = 'Test failed: %s' % test.result
self.announce(msg, log.ERROR)
raise DistutilsError(msg)
@property
def _argv(self):
return ['unittest'] + self.test_args
@staticmethod
def _resolve_as_ep(val):
"""
Load the indicated attribute value, called, as a as if it were
specified as an entry point.
"""
if val is None:
return
parsed = EntryPoint.parse("x=" + val)
return parsed.resolve()()
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/saveopts.py
|
from setuptools.command.setopt import edit_config, option_base
class saveopts(option_base):
"""Save command-line options to a file"""
description = "save supplied options to setup.cfg or other config file"
def run(self):
dist = self.distribution
settings = {}
for cmd in dist.command_options:
if cmd == 'saveopts':
continue # don't save our own options!
for opt, (src, val) in dist.get_option_dict(cmd).items():
if src == "command line":
settings.setdefault(cmd, {})[opt] = val
edit_config(self.filename, settings, self.dry_run)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/bdist_rpm.py
|
import distutils.command.bdist_rpm as orig
class bdist_rpm(orig.bdist_rpm):
"""
Override the default bdist_rpm behavior to do the following:
1. Run egg_info to ensure the name and version are properly calculated.
2. Always run 'install' using --single-version-externally-managed to
disable eggs in RPM distributions.
3. Replace dash with underscore in the version numbers for better RPM
compatibility.
"""
def run(self):
# ensure distro name is up-to-date
self.run_command('egg_info')
orig.bdist_rpm.run(self)
def _make_spec_file(self):
version = self.distribution.get_version()
rpmversion = version.replace('-', '_')
spec = orig.bdist_rpm._make_spec_file(self)
line23 = '%define version ' + version
line24 = '%define version ' + rpmversion
spec = [
line.replace(
"Source0: %{name}-%{version}.tar",
"Source0: %{name}-%{unmangled_version}.tar"
).replace(
"setup.py install ",
"setup.py install --single-version-externally-managed "
).replace(
"%setup",
"%setup -n %{name}-%{unmangled_version}"
).replace(line23, line24)
for line in spec
]
insert_loc = spec.index(line24) + 1
unmangled_version = "%define unmangled_version " + version
spec.insert(insert_loc, unmangled_version)
return spec
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/build_clib.py
|
import distutils.command.build_clib as orig
from distutils.errors import DistutilsSetupError
from distutils import log
from setuptools.dep_util import newer_pairwise_group
class build_clib(orig.build_clib):
"""
Override the default build_clib behaviour to do the following:
1. Implement a rudimentary timestamp-based dependency system
so 'compile()' doesn't run every time.
2. Add more keys to the 'build_info' dictionary:
* obj_deps - specify dependencies for each object compiled.
this should be a dictionary mapping a key
with the source filename to a list of
dependencies. Use an empty string for global
dependencies.
* cflags - specify a list of additional flags to pass to
the compiler.
"""
def build_libraries(self, libraries):
for (lib_name, build_info) in libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % lib_name)
sources = list(sources)
log.info("building '%s' library", lib_name)
# Make sure everything is the correct type.
# obj_deps should be a dictionary of keys as sources
# and a list/tuple of files that are its dependencies.
obj_deps = build_info.get('obj_deps', dict())
if not isinstance(obj_deps, dict):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
dependencies = []
# Get the global dependencies that are specified by the '' key.
# These will go into every source's dependency list.
global_deps = obj_deps.get('', list())
if not isinstance(global_deps, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
# Build the list to be used by newer_pairwise_group
# each source will be auto-added to its dependencies.
for source in sources:
src_deps = [source]
src_deps.extend(global_deps)
extra_deps = obj_deps.get(source, list())
if not isinstance(extra_deps, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
src_deps.extend(extra_deps)
dependencies.append(src_deps)
expected_objects = self.compiler.object_filenames(
sources,
output_dir=self.build_temp,
)
if (
newer_pairwise_group(dependencies, expected_objects)
!= ([], [])
):
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
cflags = build_info.get('cflags')
self.compiler.compile(
sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
extra_postargs=cflags,
debug=self.debug
)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(
expected_objects,
lib_name,
output_dir=self.build_clib,
debug=self.debug
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/egg_info.py
|
"""setuptools.command.egg_info
Create a distribution's .egg-info directory and contents"""
from distutils.filelist import FileList as _FileList
from distutils.errors import DistutilsInternalError
from distutils.util import convert_path
from distutils import log
import distutils.errors
import distutils.filelist
import os
import re
import sys
import io
import warnings
import time
import collections
from setuptools.extern import six
from setuptools.extern.six.moves import map
from setuptools import Command
from setuptools.command.sdist import sdist
from setuptools.command.sdist import walk_revctrl
from setuptools.command.setopt import edit_config
from setuptools.command import bdist_egg
from pkg_resources import (
parse_requirements, safe_name, parse_version,
safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename)
import setuptools.unicode_utils as unicode_utils
from setuptools.glob import glob
from setuptools.extern import packaging
from setuptools import SetuptoolsDeprecationWarning
def translate_pattern(glob):
"""
Translate a file path glob like '*.txt' in to a regular expression.
This differs from fnmatch.translate which allows wildcards to match
directory separators. It also knows about '**/' which matches any number of
directories.
"""
pat = ''
# This will split on '/' within [character classes]. This is deliberate.
chunks = glob.split(os.path.sep)
sep = re.escape(os.sep)
valid_char = '[^%s]' % (sep,)
for c, chunk in enumerate(chunks):
last_chunk = c == len(chunks) - 1
# Chunks that are a literal ** are globstars. They match anything.
if chunk == '**':
if last_chunk:
# Match anything if this is the last component
pat += '.*'
else:
# Match '(name/)*'
pat += '(?:%s+%s)*' % (valid_char, sep)
continue # Break here as the whole path component has been handled
# Find any special characters in the remainder
i = 0
chunk_len = len(chunk)
while i < chunk_len:
char = chunk[i]
if char == '*':
# Match any number of name characters
pat += valid_char + '*'
elif char == '?':
# Match a name character
pat += valid_char
elif char == '[':
# Character class
inner_i = i + 1
# Skip initial !/] chars
if inner_i < chunk_len and chunk[inner_i] == '!':
inner_i = inner_i + 1
if inner_i < chunk_len and chunk[inner_i] == ']':
inner_i = inner_i + 1
# Loop till the closing ] is found
while inner_i < chunk_len and chunk[inner_i] != ']':
inner_i = inner_i + 1
if inner_i >= chunk_len:
# Got to the end of the string without finding a closing ]
# Do not treat this as a matching group, but as a literal [
pat += re.escape(char)
else:
# Grab the insides of the [brackets]
inner = chunk[i + 1:inner_i]
char_class = ''
# Class negation
if inner[0] == '!':
char_class = '^'
inner = inner[1:]
char_class += re.escape(inner)
pat += '[%s]' % (char_class,)
# Skip to the end ]
i = inner_i
else:
pat += re.escape(char)
i += 1
# Join each chunk with the dir separator
if not last_chunk:
pat += sep
pat += r'\Z'
return re.compile(pat, flags=re.MULTILINE | re.DOTALL)
class InfoCommon:
tag_build = None
tag_date = None
@property
def name(self):
return safe_name(self.distribution.get_name())
def tagged_version(self):
version = self.distribution.get_version()
# egg_info may be called more than once for a distribution,
# in which case the version string already contains all tags.
if self.vtags and version.endswith(self.vtags):
return safe_version(version)
return safe_version(version + self.vtags)
def tags(self):
version = ''
if self.tag_build:
version += self.tag_build
if self.tag_date:
version += time.strftime("-%Y%m%d")
return version
vtags = property(tags)
class egg_info(InfoCommon, Command):
description = "create a distribution's .egg-info directory"
user_options = [
('egg-base=', 'e', "directory containing .egg-info directories"
" (default: top of the source tree)"),
('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
('tag-build=', 'b', "Specify explicit tag to add to version number"),
('no-date', 'D', "Don't include date stamp [default]"),
]
boolean_options = ['tag-date']
negative_opt = {
'no-date': 'tag-date',
}
def initialize_options(self):
self.egg_base = None
self.egg_name = None
self.egg_info = None
self.egg_version = None
self.broken_egg_info = False
####################################
# allow the 'tag_svn_revision' to be detected and
# set, supporting sdists built on older Setuptools.
@property
def tag_svn_revision(self):
pass
@tag_svn_revision.setter
def tag_svn_revision(self, value):
pass
####################################
def save_version_info(self, filename):
"""
Materialize the value of date into the
build tag. Install build keys in a deterministic order
to avoid arbitrary reordering on subsequent builds.
"""
egg_info = collections.OrderedDict()
# follow the order these keys would have been added
# when PYTHONHASHSEED=0
egg_info['tag_build'] = self.tags()
egg_info['tag_date'] = 0
edit_config(filename, dict(egg_info=egg_info))
def finalize_options(self):
# Note: we need to capture the current value returned
# by `self.tagged_version()`, so we can later update
# `self.distribution.metadata.version` without
# repercussions.
self.egg_name = self.name
self.egg_version = self.tagged_version()
parsed_version = parse_version(self.egg_version)
try:
is_version = isinstance(parsed_version, packaging.version.Version)
spec = (
"%s==%s" if is_version else "%s===%s"
)
list(
parse_requirements(spec % (self.egg_name, self.egg_version))
)
except ValueError as e:
raise distutils.errors.DistutilsOptionError(
"Invalid distribution name or version syntax: %s-%s" %
(self.egg_name, self.egg_version)
) from e
if self.egg_base is None:
dirs = self.distribution.package_dir
self.egg_base = (dirs or {}).get('', os.curdir)
self.ensure_dirname('egg_base')
self.egg_info = to_filename(self.egg_name) + '.egg-info'
if self.egg_base != os.curdir:
self.egg_info = os.path.join(self.egg_base, self.egg_info)
if '-' in self.egg_name:
self.check_broken_egg_info()
# Set package version for the benefit of dumber commands
# (e.g. sdist, bdist_wininst, etc.)
#
self.distribution.metadata.version = self.egg_version
# If we bootstrapped around the lack of a PKG-INFO, as might be the
# case in a fresh checkout, make sure that any special tags get added
# to the version info
#
pd = self.distribution._patched_dist
if pd is not None and pd.key == self.egg_name.lower():
pd._version = self.egg_version
pd._parsed_version = parse_version(self.egg_version)
self.distribution._patched_dist = None
def write_or_delete_file(self, what, filename, data, force=False):
"""Write `data` to `filename` or delete if empty
If `data` is non-empty, this routine is the same as ``write_file()``.
If `data` is empty but not ``None``, this is the same as calling
``delete_file(filename)`. If `data` is ``None``, then this is a no-op
unless `filename` exists, in which case a warning is issued about the
orphaned file (if `force` is false), or deleted (if `force` is true).
"""
if data:
self.write_file(what, filename, data)
elif os.path.exists(filename):
if data is None and not force:
log.warn(
"%s not set in setup(), but %s exists", what, filename
)
return
else:
self.delete_file(filename)
def write_file(self, what, filename, data):
"""Write `data` to `filename` (if not a dry run) after announcing it
`what` is used in a log message to identify what is being written
to the file.
"""
log.info("writing %s to %s", what, filename)
if not six.PY2:
data = data.encode("utf-8")
if not self.dry_run:
f = open(filename, 'wb')
f.write(data)
f.close()
def delete_file(self, filename):
"""Delete `filename` (if not a dry run) after announcing it"""
log.info("deleting %s", filename)
if not self.dry_run:
os.unlink(filename)
def run(self):
self.mkpath(self.egg_info)
os.utime(self.egg_info, None)
installer = self.distribution.fetch_build_egg
for ep in iter_entry_points('egg_info.writers'):
ep.require(installer=installer)
writer = ep.resolve()
writer(self, ep.name, os.path.join(self.egg_info, ep.name))
# Get rid of native_libs.txt if it was put there by older bdist_egg
nl = os.path.join(self.egg_info, "native_libs.txt")
if os.path.exists(nl):
self.delete_file(nl)
self.find_sources()
def find_sources(self):
"""Generate SOURCES.txt manifest file"""
manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
mm = manifest_maker(self.distribution)
mm.manifest = manifest_filename
mm.run()
self.filelist = mm.filelist
def check_broken_egg_info(self):
bei = self.egg_name + '.egg-info'
if self.egg_base != os.curdir:
bei = os.path.join(self.egg_base, bei)
if os.path.exists(bei):
log.warn(
"-" * 78 + '\n'
"Note: Your current .egg-info directory has a '-' in its name;"
'\nthis will not work correctly with "setup.py develop".\n\n'
'Please rename %s to %s to correct this problem.\n' + '-' * 78,
bei, self.egg_info
)
self.broken_egg_info = self.egg_info
self.egg_info = bei # make it work for now
class FileList(_FileList):
# Implementations of the various MANIFEST.in commands
def process_template_line(self, line):
# Parse the line: split it up, make sure the right number of words
# is there, and return the relevant words. 'action' is always
# defined: it's the first word of the line. Which of the other
# three are defined depends on the action; it'll be either
# patterns, (dir and patterns), or (dir_pattern).
(action, patterns, dir, dir_pattern) = self._parse_template_line(line)
# OK, now we know that the action is valid and we have the
# right number of words on the line for that action -- so we
# can proceed with minimal error-checking.
if action == 'include':
self.debug_print("include " + ' '.join(patterns))
for pattern in patterns:
if not self.include(pattern):
log.warn("warning: no files found matching '%s'", pattern)
elif action == 'exclude':
self.debug_print("exclude " + ' '.join(patterns))
for pattern in patterns:
if not self.exclude(pattern):
log.warn(("warning: no previously-included files "
"found matching '%s'"), pattern)
elif action == 'global-include':
self.debug_print("global-include " + ' '.join(patterns))
for pattern in patterns:
if not self.global_include(pattern):
log.warn(("warning: no files found matching '%s' "
"anywhere in distribution"), pattern)
elif action == 'global-exclude':
self.debug_print("global-exclude " + ' '.join(patterns))
for pattern in patterns:
if not self.global_exclude(pattern):
log.warn(("warning: no previously-included files matching "
"'%s' found anywhere in distribution"),
pattern)
elif action == 'recursive-include':
self.debug_print("recursive-include %s %s" %
(dir, ' '.join(patterns)))
for pattern in patterns:
if not self.recursive_include(dir, pattern):
log.warn(("warning: no files found matching '%s' "
"under directory '%s'"),
pattern, dir)
elif action == 'recursive-exclude':
self.debug_print("recursive-exclude %s %s" %
(dir, ' '.join(patterns)))
for pattern in patterns:
if not self.recursive_exclude(dir, pattern):
log.warn(("warning: no previously-included files matching "
"'%s' found under directory '%s'"),
pattern, dir)
elif action == 'graft':
self.debug_print("graft " + dir_pattern)
if not self.graft(dir_pattern):
log.warn("warning: no directories found matching '%s'",
dir_pattern)
elif action == 'prune':
self.debug_print("prune " + dir_pattern)
if not self.prune(dir_pattern):
log.warn(("no previously-included directories found "
"matching '%s'"), dir_pattern)
else:
raise DistutilsInternalError(
"this cannot happen: invalid action '%s'" % action)
def _remove_files(self, predicate):
"""
Remove all files from the file list that match the predicate.
Return True if any matching files were removed
"""
found = False
for i in range(len(self.files) - 1, -1, -1):
if predicate(self.files[i]):
self.debug_print(" removing " + self.files[i])
del self.files[i]
found = True
return found
def include(self, pattern):
"""Include files that match 'pattern'."""
found = [f for f in glob(pattern) if not os.path.isdir(f)]
self.extend(found)
return bool(found)
def exclude(self, pattern):
"""Exclude files that match 'pattern'."""
match = translate_pattern(pattern)
return self._remove_files(match.match)
def recursive_include(self, dir, pattern):
"""
Include all files anywhere in 'dir/' that match the pattern.
"""
full_pattern = os.path.join(dir, '**', pattern)
found = [f for f in glob(full_pattern, recursive=True)
if not os.path.isdir(f)]
self.extend(found)
return bool(found)
def recursive_exclude(self, dir, pattern):
"""
Exclude any file anywhere in 'dir/' that match the pattern.
"""
match = translate_pattern(os.path.join(dir, '**', pattern))
return self._remove_files(match.match)
def graft(self, dir):
"""Include all files from 'dir/'."""
found = [
item
for match_dir in glob(dir)
for item in distutils.filelist.findall(match_dir)
]
self.extend(found)
return bool(found)
def prune(self, dir):
"""Filter out files from 'dir/'."""
match = translate_pattern(os.path.join(dir, '**'))
return self._remove_files(match.match)
def global_include(self, pattern):
"""
Include all files anywhere in the current directory that match the
pattern. This is very inefficient on large file trees.
"""
if self.allfiles is None:
self.findall()
match = translate_pattern(os.path.join('**', pattern))
found = [f for f in self.allfiles if match.match(f)]
self.extend(found)
return bool(found)
def global_exclude(self, pattern):
"""
Exclude all files anywhere that match the pattern.
"""
match = translate_pattern(os.path.join('**', pattern))
return self._remove_files(match.match)
def append(self, item):
if item.endswith('\r'): # Fix older sdists built on Windows
item = item[:-1]
path = convert_path(item)
if self._safe_path(path):
self.files.append(path)
def extend(self, paths):
self.files.extend(filter(self._safe_path, paths))
def _repair(self):
"""
Replace self.files with only safe paths
Because some owners of FileList manipulate the underlying
``files`` attribute directly, this method must be called to
repair those paths.
"""
self.files = list(filter(self._safe_path, self.files))
def _safe_path(self, path):
enc_warn = "'%s' not %s encodable -- skipping"
# To avoid accidental trans-codings errors, first to unicode
u_path = unicode_utils.filesys_decode(path)
if u_path is None:
log.warn("'%s' in unexpected encoding -- skipping" % path)
return False
# Must ensure utf-8 encodability
utf8_path = unicode_utils.try_encode(u_path, "utf-8")
if utf8_path is None:
log.warn(enc_warn, path, 'utf-8')
return False
try:
# accept is either way checks out
if os.path.exists(u_path) or os.path.exists(utf8_path):
return True
# this will catch any encode errors decoding u_path
except UnicodeEncodeError:
log.warn(enc_warn, path, sys.getfilesystemencoding())
class manifest_maker(sdist):
template = "MANIFEST.in"
def initialize_options(self):
self.use_defaults = 1
self.prune = 1
self.manifest_only = 1
self.force_manifest = 1
def finalize_options(self):
pass
def run(self):
self.filelist = FileList()
if not os.path.exists(self.manifest):
self.write_manifest() # it must exist so it'll get in the list
self.add_defaults()
if os.path.exists(self.template):
self.read_template()
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def _manifest_normalize(self, path):
path = unicode_utils.filesys_decode(path)
return path.replace(os.sep, '/')
def write_manifest(self):
"""
Write the file list in 'self.filelist' to the manifest file
named by 'self.manifest'.
"""
self.filelist._repair()
# Now _repairs should encodability, but not unicode
files = [self._manifest_normalize(f) for f in self.filelist.files]
msg = "writing manifest file '%s'" % self.manifest
self.execute(write_file, (self.manifest, files), msg)
def warn(self, msg):
if not self._should_suppress_warning(msg):
sdist.warn(self, msg)
@staticmethod
def _should_suppress_warning(msg):
"""
suppress missing-file warnings from sdist
"""
return re.match(r"standard file .*not found", msg)
def add_defaults(self):
sdist.add_defaults(self)
self.check_license()
self.filelist.append(self.template)
self.filelist.append(self.manifest)
rcfiles = list(walk_revctrl())
if rcfiles:
self.filelist.extend(rcfiles)
elif os.path.exists(self.manifest):
self.read_manifest()
if os.path.exists("setup.py"):
# setup.py should be included by default, even if it's not
# the script called to create the sdist
self.filelist.append("setup.py")
ei_cmd = self.get_finalized_command('egg_info')
self.filelist.graft(ei_cmd.egg_info)
def prune_file_list(self):
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.prune(build.build_base)
self.filelist.prune(base_dir)
sep = re.escape(os.sep)
self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep,
is_regex=1)
def write_file(filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
contents = "\n".join(contents)
# assuming the contents has been vetted for utf-8 encoding
contents = contents.encode("utf-8")
with open(filename, "wb") as f: # always write POSIX-style manifest
f.write(contents)
def write_pkg_info(cmd, basename, filename):
log.info("writing %s", filename)
if not cmd.dry_run:
metadata = cmd.distribution.metadata
metadata.version, oldver = cmd.egg_version, metadata.version
metadata.name, oldname = cmd.egg_name, metadata.name
try:
# write unescaped data to PKG-INFO, so older pkg_resources
# can still parse it
metadata.write_pkg_info(cmd.egg_info)
finally:
metadata.name, metadata.version = oldname, oldver
safe = getattr(cmd.distribution, 'zip_safe', None)
bdist_egg.write_safety_flag(cmd.egg_info, safe)
def warn_depends_obsolete(cmd, basename, filename):
if os.path.exists(filename):
log.warn(
"WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
"Use the install_requires/extras_require setup() args instead."
)
def _write_requirements(stream, reqs):
lines = yield_lines(reqs or ())
def append_cr(line):
return line + '\n'
lines = map(append_cr, lines)
stream.writelines(lines)
def write_requirements(cmd, basename, filename):
dist = cmd.distribution
data = six.StringIO()
_write_requirements(data, dist.install_requires)
extras_require = dist.extras_require or {}
for extra in sorted(extras_require):
data.write('\n[{extra}]\n'.format(**vars()))
_write_requirements(data, extras_require[extra])
cmd.write_or_delete_file("requirements", filename, data.getvalue())
def write_setup_requirements(cmd, basename, filename):
data = io.StringIO()
_write_requirements(data, cmd.distribution.setup_requires)
cmd.write_or_delete_file("setup-requirements", filename, data.getvalue())
def write_toplevel_names(cmd, basename, filename):
pkgs = dict.fromkeys(
[
k.split('.', 1)[0]
for k in cmd.distribution.iter_distribution_names()
]
)
cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n')
def overwrite_arg(cmd, basename, filename):
write_arg(cmd, basename, filename, True)
def write_arg(cmd, basename, filename, force=False):
argname = os.path.splitext(basename)[0]
value = getattr(cmd.distribution, argname, None)
if value is not None:
value = '\n'.join(value) + '\n'
cmd.write_or_delete_file(argname, filename, value, force)
def write_entries(cmd, basename, filename):
ep = cmd.distribution.entry_points
if isinstance(ep, six.string_types) or ep is None:
data = ep
elif ep is not None:
data = []
for section, contents in sorted(ep.items()):
if not isinstance(contents, six.string_types):
contents = EntryPoint.parse_group(section, contents)
contents = '\n'.join(sorted(map(str, contents.values())))
data.append('[%s]\n%s\n\n' % (section, contents))
data = ''.join(data)
cmd.write_or_delete_file('entry points', filename, data, True)
def get_pkg_info_revision():
"""
Get a -r### off of PKG-INFO Version in case this is an sdist of
a subversion revision.
"""
warnings.warn(
"get_pkg_info_revision is deprecated.", EggInfoDeprecationWarning)
if os.path.exists('PKG-INFO'):
with io.open('PKG-INFO') as f:
for line in f:
match = re.match(r"Version:.*-r(\d+)\s*$", line)
if match:
return int(match.group(1))
return 0
class EggInfoDeprecationWarning(SetuptoolsDeprecationWarning):
"""Deprecated behavior warning for EggInfo, bypassing suppression."""
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/install.py
|
from distutils.errors import DistutilsArgError
import inspect
import glob
import warnings
import platform
import distutils.command.install as orig
import setuptools
# Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for
# now. See https://github.com/pypa/setuptools/issues/199/
_install = orig.install
class install(orig.install):
"""Use easy_install to install the package, w/dependencies"""
user_options = orig.install.user_options + [
('old-and-unmanageable', None, "Try not to use this!"),
('single-version-externally-managed', None,
"used by system package builders to create 'flat' eggs"),
]
boolean_options = orig.install.boolean_options + [
'old-and-unmanageable', 'single-version-externally-managed',
]
new_commands = [
('install_egg_info', lambda self: True),
('install_scripts', lambda self: True),
]
_nc = dict(new_commands)
def initialize_options(self):
orig.install.initialize_options(self)
self.old_and_unmanageable = None
self.single_version_externally_managed = None
def finalize_options(self):
orig.install.finalize_options(self)
if self.root:
self.single_version_externally_managed = True
elif self.single_version_externally_managed:
if not self.root and not self.record:
raise DistutilsArgError(
"You must specify --record or --root when building system"
" packages"
)
def handle_extra_path(self):
if self.root or self.single_version_externally_managed:
# explicit backward-compatibility mode, allow extra_path to work
return orig.install.handle_extra_path(self)
# Ignore extra_path when installing an egg (or being run by another
# command without --root or --single-version-externally-managed
self.path_file = None
self.extra_dirs = ''
def run(self):
# Explicit request for old-style install? Just do it
if self.old_and_unmanageable or self.single_version_externally_managed:
return orig.install.run(self)
if not self._called_from_setup(inspect.currentframe()):
# Run in backward-compatibility mode to support bdist_* commands.
orig.install.run(self)
else:
self.do_egg_install()
@staticmethod
def _called_from_setup(run_frame):
"""
Attempt to detect whether run() was called from setup() or by another
command. If called by setup(), the parent caller will be the
'run_command' method in 'distutils.dist', and *its* caller will be
the 'run_commands' method. If called any other way, the
immediate caller *might* be 'run_command', but it won't have been
called by 'run_commands'. Return True in that case or if a call stack
is unavailable. Return False otherwise.
"""
if run_frame is None:
msg = "Call stack not available. bdist_* commands may fail."
warnings.warn(msg)
if platform.python_implementation() == 'IronPython':
msg = "For best results, pass -X:Frames to enable call stack."
warnings.warn(msg)
return True
res = inspect.getouterframes(run_frame)[2]
caller, = res[:1]
info = inspect.getframeinfo(caller)
caller_module = caller.f_globals.get('__name__', '')
return (
caller_module == 'distutils.dist'
and info.function == 'run_commands'
)
def do_egg_install(self):
easy_install = self.distribution.get_command_class('easy_install')
cmd = easy_install(
self.distribution, args="x", root=self.root, record=self.record,
)
cmd.ensure_finalized() # finalize before bdist_egg munges install cmd
cmd.always_copy_from = '.' # make sure local-dir eggs get installed
# pick up setup-dir .egg files only: no .egg-info
cmd.package_index.scan(glob.glob('*.egg'))
self.run_command('bdist_egg')
args = [self.distribution.get_command_obj('bdist_egg').egg_output]
if setuptools.bootstrap_install_from:
# Bootstrap self-installation of setuptools
args.insert(0, setuptools.bootstrap_install_from)
cmd.args = args
cmd.run(show_deprecation=False)
setuptools.bootstrap_install_from = None
# XXX Python 3.1 doesn't see _nc if this is inside the class
install.sub_commands = (
[cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] +
install.new_commands
)
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/develop.py
|
from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsError, DistutilsOptionError
import os
import glob
import io
from setuptools.extern import six
import pkg_resources
from setuptools.command.easy_install import easy_install
from setuptools import namespaces
import setuptools
__metaclass__ = type
class develop(namespaces.DevelopInstaller, easy_install):
"""Set up package for development"""
description = "install package in 'development mode'"
user_options = easy_install.user_options + [
("uninstall", "u", "Uninstall this source package"),
("egg-path=", None, "Set the path to be used in the .egg-link file"),
]
boolean_options = easy_install.boolean_options + ['uninstall']
command_consumes_arguments = False # override base
def run(self):
if self.uninstall:
self.multi_version = True
self.uninstall_link()
self.uninstall_namespaces()
else:
self.install_for_development()
self.warn_deprecated_options()
def initialize_options(self):
self.uninstall = None
self.egg_path = None
easy_install.initialize_options(self)
self.setup_path = None
self.always_copy_from = '.' # always copy eggs installed in curdir
def finalize_options(self):
ei = self.get_finalized_command("egg_info")
if ei.broken_egg_info:
template = "Please rename %r to %r before using 'develop'"
args = ei.egg_info, ei.broken_egg_info
raise DistutilsError(template % args)
self.args = [ei.egg_name]
easy_install.finalize_options(self)
self.expand_basedirs()
self.expand_dirs()
# pick up setup-dir .egg files only: no .egg-info
self.package_index.scan(glob.glob('*.egg'))
egg_link_fn = ei.egg_name + '.egg-link'
self.egg_link = os.path.join(self.install_dir, egg_link_fn)
self.egg_base = ei.egg_base
if self.egg_path is None:
self.egg_path = os.path.abspath(ei.egg_base)
target = pkg_resources.normalize_path(self.egg_base)
egg_path = pkg_resources.normalize_path(
os.path.join(self.install_dir, self.egg_path))
if egg_path != target:
raise DistutilsOptionError(
"--egg-path must be a relative path from the install"
" directory to " + target
)
# Make a distribution for the package's source
self.dist = pkg_resources.Distribution(
target,
pkg_resources.PathMetadata(target, os.path.abspath(ei.egg_info)),
project_name=ei.egg_name
)
self.setup_path = self._resolve_setup_path(
self.egg_base,
self.install_dir,
self.egg_path,
)
@staticmethod
def _resolve_setup_path(egg_base, install_dir, egg_path):
"""
Generate a path from egg_base back to '.' where the
setup script resides and ensure that path points to the
setup path from $install_dir/$egg_path.
"""
path_to_setup = egg_base.replace(os.sep, '/').rstrip('/')
if path_to_setup != os.curdir:
path_to_setup = '../' * (path_to_setup.count('/') + 1)
resolved = pkg_resources.normalize_path(
os.path.join(install_dir, egg_path, path_to_setup)
)
if resolved != pkg_resources.normalize_path(os.curdir):
raise DistutilsOptionError(
"Can't get a consistent path to setup script from"
" installation directory", resolved,
pkg_resources.normalize_path(os.curdir))
return path_to_setup
def install_for_development(self):
if not six.PY2 and getattr(self.distribution, 'use_2to3', False):
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = pkg_resources.normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
# Fixup egg-link and easy-install.pth
ei_cmd = self.get_finalized_command("egg_info")
self.egg_path = build_path
self.dist.location = build_path
# XXX
self.dist._provider = pkg_resources.PathMetadata(
build_path, ei_cmd.egg_info)
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
if setuptools.bootstrap_install_from:
self.easy_install(setuptools.bootstrap_install_from)
setuptools.bootstrap_install_from = None
self.install_namespaces()
# create an .egg-link in the installation dir, pointing to our egg
log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
if not self.dry_run:
with open(self.egg_link, "w") as f:
f.write(self.egg_path + "\n" + self.setup_path)
# postprocess the installed distro, fixing up .pth, installing scripts,
# and handling requirements
self.process_distribution(None, self.dist, not self.no_deps)
def uninstall_link(self):
if os.path.exists(self.egg_link):
log.info("Removing %s (link to %s)", self.egg_link, self.egg_base)
egg_link_file = open(self.egg_link)
contents = [line.rstrip() for line in egg_link_file]
egg_link_file.close()
if contents not in ([self.egg_path],
[self.egg_path, self.setup_path]):
log.warn("Link points to %s: uninstall aborted", contents)
return
if not self.dry_run:
os.unlink(self.egg_link)
if not self.dry_run:
self.update_pth(self.dist) # remove any .pth link to us
if self.distribution.scripts:
# XXX should also check for entry point scripts!
log.warn("Note: you must uninstall or replace scripts manually!")
def install_egg_scripts(self, dist):
if dist is not self.dist:
# Installing a dependency, so fall back to normal behavior
return easy_install.install_egg_scripts(self, dist)
# create wrapper scripts in the script dir, pointing to dist.scripts
# new-style...
self.install_wrapper_scripts(dist)
# ...and old-style
for script_name in self.distribution.scripts or []:
script_path = os.path.abspath(convert_path(script_name))
script_name = os.path.basename(script_path)
with io.open(script_path) as strm:
script_text = strm.read()
self.install_script(dist, script_name, script_text, script_path)
def install_wrapper_scripts(self, dist):
dist = VersionlessRequirement(dist)
return easy_install.install_wrapper_scripts(self, dist)
class VersionlessRequirement:
"""
Adapt a pkg_resources.Distribution to simply return the project
name as the 'requirement' so that scripts will work across
multiple versions.
>>> from pkg_resources import Distribution
>>> dist = Distribution(project_name='foo', version='1.0')
>>> str(dist.as_requirement())
'foo==1.0'
>>> adapted_dist = VersionlessRequirement(dist)
>>> str(adapted_dist.as_requirement())
'foo'
"""
def __init__(self, dist):
self.__dist = dist
def __getattr__(self, name):
return getattr(self.__dist, name)
def as_requirement(self):
return self.project_name
| 0 |
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools
|
qxf2_public_repos/question-answer-api/venv/Lib/site-packages/setuptools/command/rotate.py
|
from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsOptionError
import os
import shutil
from setuptools.extern import six
from setuptools import Command
class rotate(Command):
"""Delete older distributions"""
description = "delete older distributions, keeping N newest files"
user_options = [
('match=', 'm', "patterns to match (required)"),
('dist-dir=', 'd', "directory where the distributions are"),
('keep=', 'k', "number of matching distributions to keep"),
]
boolean_options = []
def initialize_options(self):
self.match = None
self.dist_dir = None
self.keep = None
def finalize_options(self):
if self.match is None:
raise DistutilsOptionError(
"Must specify one or more (comma-separated) match patterns "
"(e.g. '.zip' or '.egg')"
)
if self.keep is None:
raise DistutilsOptionError("Must specify number of files to keep")
try:
self.keep = int(self.keep)
except ValueError as e:
raise DistutilsOptionError("--keep must be an integer") from e
if isinstance(self.match, six.string_types):
self.match = [
convert_path(p.strip()) for p in self.match.split(',')
]
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
def run(self):
self.run_command("egg_info")
from glob import glob
for pattern in self.match:
pattern = self.distribution.get_name() + '*' + pattern
files = glob(os.path.join(self.dist_dir, pattern))
files = [(os.path.getmtime(f), f) for f in files]
files.sort()
files.reverse()
log.info("%d file(s) matching %s", len(files), pattern)
files = files[self.keep:]
for (t, f) in files:
log.info("Deleting %s", f)
if not self.dry_run:
if os.path.isdir(f):
shutil.rmtree(f)
else:
os.unlink(f)
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.