nwo
stringlengths 10
28
| sha
stringlengths 40
40
| path
stringlengths 11
97
| identifier
stringlengths 1
64
| parameters
stringlengths 2
2.24k
| return_statement
stringlengths 0
2.17k
| docstring
stringlengths 0
5.45k
| docstring_summary
stringlengths 0
3.83k
| func_begin
int64 1
13.4k
| func_end
int64 2
13.4k
| function
stringlengths 28
56.4k
| url
stringlengths 106
209
| project
int64 1
48
| executed_lines
list | executed_lines_pc
float64 0
153
| missing_lines
list | missing_lines_pc
float64 0
100
| covered
bool 2
classes | filecoverage
float64 2.53
100
| function_lines
int64 2
1.46k
| mccabe
int64 1
253
| coverage
float64 0
100
| docstring_lines
int64 0
112
| function_nodoc
stringlengths 9
56.4k
| id
int64 0
29.8k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
pallets/jinja
|
89eec1c5ee5022342f05c121dccb0e7b8b0ac523
|
src/jinja2/runtime.py
|
ChainableUndefined.__html__
|
(self)
|
return str(self)
| 979 | 980 |
def __html__(self) -> str:
return str(self)
|
https://github.com/pallets/jinja/blob/89eec1c5ee5022342f05c121dccb0e7b8b0ac523/project42/src/jinja2/runtime.py#L979-L980
| 42 |
[
0,
1
] | 100 |
[] | 0 | true | 89.049919 | 2 | 1 | 100 | 0 |
def __html__(self) -> str:
return str(self)
| 28,061 |
||
pallets/jinja
|
89eec1c5ee5022342f05c121dccb0e7b8b0ac523
|
src/jinja2/runtime.py
|
ChainableUndefined.__getattr__
|
(self, _: str)
|
return self
| 982 | 983 |
def __getattr__(self, _: str) -> "ChainableUndefined":
return self
|
https://github.com/pallets/jinja/blob/89eec1c5ee5022342f05c121dccb0e7b8b0ac523/project42/src/jinja2/runtime.py#L982-L983
| 42 |
[
0,
1
] | 100 |
[] | 0 | true | 89.049919 | 2 | 1 | 100 | 0 |
def __getattr__(self, _: str) -> "ChainableUndefined":
return self
| 28,062 |
||
pallets/jinja
|
89eec1c5ee5022342f05c121dccb0e7b8b0ac523
|
src/jinja2/runtime.py
|
DebugUndefined.__str__
|
(self)
|
return f"{{{{ {message} }}}}"
| 1,004 | 1,017 |
def __str__(self) -> str:
if self._undefined_hint:
message = f"undefined value printed: {self._undefined_hint}"
elif self._undefined_obj is missing:
message = self._undefined_name # type: ignore
else:
message = (
f"no such element: {object_type_repr(self._undefined_obj)}"
f"[{self._undefined_name!r}]"
)
return f"{{{{ {message} }}}}"
|
https://github.com/pallets/jinja/blob/89eec1c5ee5022342f05c121dccb0e7b8b0ac523/project42/src/jinja2/runtime.py#L1004-L1017
| 42 |
[
0,
1,
2,
3,
4,
5,
6,
8,
12,
13
] | 71.428571 |
[] | 0 | false | 89.049919 | 14 | 3 | 100 | 0 |
def __str__(self) -> str:
if self._undefined_hint:
message = f"undefined value printed: {self._undefined_hint}"
elif self._undefined_obj is missing:
message = self._undefined_name # type: ignore
else:
message = (
f"no such element: {object_type_repr(self._undefined_obj)}"
f"[{self._undefined_name!r}]"
)
return f"{{{{ {message} }}}}"
| 28,063 |
||
pallets/jinja
|
89eec1c5ee5022342f05c121dccb0e7b8b0ac523
|
src/jinja2/visitor.py
|
NodeVisitor.get_visitor
|
(self, node: Node)
|
return getattr(self, f"visit_{type(node).__name__}", None)
|
Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
|
Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
| 28 | 33 |
def get_visitor(self, node: Node) -> "t.Optional[VisitCallable]":
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
return getattr(self, f"visit_{type(node).__name__}", None)
|
https://github.com/pallets/jinja/blob/89eec1c5ee5022342f05c121dccb0e7b8b0ac523/project42/src/jinja2/visitor.py#L28-L33
| 42 |
[
0,
1,
2,
3,
4,
5
] | 100 |
[] | 0 | true | 73.529412 | 6 | 1 | 100 | 3 |
def get_visitor(self, node: Node) -> "t.Optional[VisitCallable]":
return getattr(self, f"visit_{type(node).__name__}", None)
| 28,064 |
pallets/jinja
|
89eec1c5ee5022342f05c121dccb0e7b8b0ac523
|
src/jinja2/visitor.py
|
NodeVisitor.visit
|
(self, node: Node, *args: t.Any, **kwargs: t.Any)
|
return self.generic_visit(node, *args, **kwargs)
|
Visit a node.
|
Visit a node.
| 35 | 42 |
def visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
|
https://github.com/pallets/jinja/blob/89eec1c5ee5022342f05c121dccb0e7b8b0ac523/project42/src/jinja2/visitor.py#L35-L42
| 42 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 100 |
[] | 0 | true | 73.529412 | 8 | 2 | 100 | 1 |
def visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
| 28,065 |
pallets/jinja
|
89eec1c5ee5022342f05c121dccb0e7b8b0ac523
|
src/jinja2/visitor.py
|
NodeVisitor.generic_visit
|
(self, node: Node, *args: t.Any, **kwargs: t.Any)
|
Called if no explicit visitor function exists for a node.
|
Called if no explicit visitor function exists for a node.
| 44 | 47 |
def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Called if no explicit visitor function exists for a node."""
for child_node in node.iter_child_nodes():
self.visit(child_node, *args, **kwargs)
|
https://github.com/pallets/jinja/blob/89eec1c5ee5022342f05c121dccb0e7b8b0ac523/project42/src/jinja2/visitor.py#L44-L47
| 42 |
[
0,
1,
2,
3
] | 100 |
[] | 0 | true | 73.529412 | 4 | 2 | 100 | 1 |
def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.Any:
for child_node in node.iter_child_nodes():
self.visit(child_node, *args, **kwargs)
| 28,066 |
|
pallets/jinja
|
89eec1c5ee5022342f05c121dccb0e7b8b0ac523
|
src/jinja2/visitor.py
|
NodeTransformer.generic_visit
|
(self, node: Node, *args: t.Any, **kwargs: t.Any)
|
return node
| 61 | 81 |
def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> Node:
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
|
https://github.com/pallets/jinja/blob/89eec1c5ee5022342f05c121dccb0e7b8b0ac523/project42/src/jinja2/visitor.py#L61-L81
| 42 |
[
0,
1,
2,
3,
4,
5,
6,
7,
9,
12,
13,
14,
15,
16,
19,
20
] | 76.190476 |
[
8,
10,
11,
17
] | 19.047619 | false | 73.529412 | 21 | 9 | 80.952381 | 0 |
def generic_visit(self, node: Node, *args: t.Any, **kwargs: t.Any) -> Node:
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
| 28,067 |
||
pallets/jinja
|
89eec1c5ee5022342f05c121dccb0e7b8b0ac523
|
src/jinja2/visitor.py
|
NodeTransformer.visit_list
|
(self, node: Node, *args: t.Any, **kwargs: t.Any)
|
return rv
|
As transformers may return lists in some places this method
can be used to enforce a list as return value.
|
As transformers may return lists in some places this method
can be used to enforce a list as return value.
| 83 | 92 |
def visit_list(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.List[Node]:
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
return [rv]
return rv
|
https://github.com/pallets/jinja/blob/89eec1c5ee5022342f05c121dccb0e7b8b0ac523/project42/src/jinja2/visitor.py#L83-L92
| 42 |
[
0,
1,
2,
3
] | 40 |
[
4,
6,
7,
9
] | 40 | false | 73.529412 | 10 | 2 | 60 | 2 |
def visit_list(self, node: Node, *args: t.Any, **kwargs: t.Any) -> t.List[Node]:
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
return [rv]
return rv
| 28,068 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/chardata.py
|
_build_regexes
|
()
|
return encoding_regexes
|
ENCODING_REGEXES contain reasonably fast ways to detect if we
could represent a given string in a given encoding. The simplest one is
the 'ascii' detector, which of course just determines if all characters
are between U+0000 and U+007F.
|
ENCODING_REGEXES contain reasonably fast ways to detect if we
could represent a given string in a given encoding. The simplest one is
the 'ascii' detector, which of course just determines if all characters
are between U+0000 and U+007F.
| 30 | 54 |
def _build_regexes():
"""
ENCODING_REGEXES contain reasonably fast ways to detect if we
could represent a given string in a given encoding. The simplest one is
the 'ascii' detector, which of course just determines if all characters
are between U+0000 and U+007F.
"""
# Define a regex that matches ASCII text.
encoding_regexes = {"ascii": re.compile("^[\x00-\x7f]*$")}
for encoding in CHARMAP_ENCODINGS:
# Make a sequence of characters that bytes \x80 to \xFF decode to
# in each encoding, as well as byte \x1A, which is used to represent
# the replacement character � in the sloppy-* encodings.
byte_range = bytes(list(range(0x80, 0x100)) + [0x1A])
charlist = byte_range.decode(encoding)
# The rest of the ASCII bytes -- bytes \x00 to \x19 and \x1B
# to \x7F -- will decode as those ASCII characters in any encoding we
# support, so we can just include them as ranges. This also lets us
# not worry about escaping regex special characters, because all of
# them are in the \x1B to \x7F range.
regex = "^[\x00-\x19\x1b-\x7f{0}]*$".format(charlist)
encoding_regexes[encoding] = re.compile(regex)
return encoding_regexes
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/chardata.py#L30-L54
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24
] | 100 |
[] | 0 | true | 100 | 25 | 2 | 100 | 4 |
def _build_regexes():
# Define a regex that matches ASCII text.
encoding_regexes = {"ascii": re.compile("^[\x00-\x7f]*$")}
for encoding in CHARMAP_ENCODINGS:
# Make a sequence of characters that bytes \x80 to \xFF decode to
# in each encoding, as well as byte \x1A, which is used to represent
# the replacement character � in the sloppy-* encodings.
byte_range = bytes(list(range(0x80, 0x100)) + [0x1A])
charlist = byte_range.decode(encoding)
# The rest of the ASCII bytes -- bytes \x00 to \x19 and \x1B
# to \x7F -- will decode as those ASCII characters in any encoding we
# support, so we can just include them as ranges. This also lets us
# not worry about escaping regex special characters, because all of
# them are in the \x1B to \x7F range.
regex = "^[\x00-\x19\x1b-\x7f{0}]*$".format(charlist)
encoding_regexes[encoding] = re.compile(regex)
return encoding_regexes
| 28,069 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/chardata.py
|
_build_html_entities
|
()
|
return entities
| 60 | 77 |
def _build_html_entities():
entities = {}
# Create a dictionary based on the built-in HTML5 entity dictionary.
# Add a limited set of HTML entities that we'll also decode if they've
# been case-folded to uppercase, such as decoding Ñ as "Ñ".
for name, char in html.entities.html5.items():
if name.endswith(";"):
entities["&" + name] = char
# Restrict the set of characters we can attempt to decode if their
# name has been uppercased. If we tried to handle all entity names,
# the results would be ambiguous.
if name == name.lower():
name_upper = name.upper()
entity_upper = "&" + name_upper
if html.unescape(entity_upper) == entity_upper:
entities[entity_upper] = char.upper()
return entities
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/chardata.py#L60-L77
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17
] | 100 |
[] | 0 | true | 100 | 18 | 5 | 100 | 0 |
def _build_html_entities():
entities = {}
# Create a dictionary based on the built-in HTML5 entity dictionary.
# Add a limited set of HTML entities that we'll also decode if they've
# been case-folded to uppercase, such as decoding Ñ as "Ñ".
for name, char in html.entities.html5.items():
if name.endswith(";"):
entities["&" + name] = char
# Restrict the set of characters we can attempt to decode if their
# name has been uppercased. If we tried to handle all entity names,
# the results would be ambiguous.
if name == name.lower():
name_upper = name.upper()
entity_upper = "&" + name_upper
if html.unescape(entity_upper) == entity_upper:
entities[entity_upper] = char.upper()
return entities
| 28,070 |
||
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/chardata.py
|
possible_encoding
|
(text, encoding)
|
return bool(ENCODING_REGEXES[encoding].match(text))
|
Given text and a single-byte encoding, check whether that text could have
been decoded from that single-byte encoding.
In other words, check whether it can be encoded in that encoding, possibly
sloppily.
|
Given text and a single-byte encoding, check whether that text could have
been decoded from that single-byte encoding.
| 84 | 92 |
def possible_encoding(text, encoding):
"""
Given text and a single-byte encoding, check whether that text could have
been decoded from that single-byte encoding.
In other words, check whether it can be encoded in that encoding, possibly
sloppily.
"""
return bool(ENCODING_REGEXES[encoding].match(text))
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/chardata.py#L84-L92
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 100 |
[] | 0 | true | 100 | 9 | 1 | 100 | 5 |
def possible_encoding(text, encoding):
return bool(ENCODING_REGEXES[encoding].match(text))
| 28,071 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/chardata.py
|
_build_control_char_mapping
|
()
|
return control_chars
|
Build a translate mapping that strips likely-unintended control characters.
See :func:`ftfy.fixes.remove_control_chars` for a description of these
codepoint ranges and why they should be removed.
|
Build a translate mapping that strips likely-unintended control characters.
See :func:`ftfy.fixes.remove_control_chars` for a description of these
codepoint ranges and why they should be removed.
| 95 | 114 |
def _build_control_char_mapping():
"""
Build a translate mapping that strips likely-unintended control characters.
See :func:`ftfy.fixes.remove_control_chars` for a description of these
codepoint ranges and why they should be removed.
"""
control_chars = {}
for i in itertools.chain(
range(0x00, 0x09),
[0x0B],
range(0x0E, 0x20),
[0x7F],
range(0x206A, 0x2070),
[0xFEFF],
range(0xFFF9, 0xFFFD),
):
control_chars[i] = None
return control_chars
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/chardata.py#L95-L114
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19
] | 100 |
[] | 0 | true | 100 | 20 | 2 | 100 | 3 |
def _build_control_char_mapping():
control_chars = {}
for i in itertools.chain(
range(0x00, 0x09),
[0x0B],
range(0x0E, 0x20),
[0x7F],
range(0x206A, 0x2070),
[0xFEFF],
range(0xFFF9, 0xFFFD),
):
control_chars[i] = None
return control_chars
| 28,072 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/chardata.py
|
_build_width_map
|
()
|
return width_map
|
Build a translate mapping that replaces halfwidth and fullwidth forms
with their standard-width forms.
|
Build a translate mapping that replaces halfwidth and fullwidth forms
with their standard-width forms.
| 231 | 245 |
def _build_width_map():
"""
Build a translate mapping that replaces halfwidth and fullwidth forms
with their standard-width forms.
"""
# Though it's not listed as a fullwidth character, we'll want to convert
# U+3000 IDEOGRAPHIC SPACE to U+20 SPACE on the same principle, so start
# with that in the dictionary.
width_map = {0x3000: " "}
for i in range(0xFF01, 0xFFF0):
char = chr(i)
alternate = unicodedata.normalize("NFKC", char)
if alternate != char:
width_map[i] = alternate
return width_map
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/chardata.py#L231-L245
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14
] | 100 |
[] | 0 | true | 100 | 15 | 3 | 100 | 2 |
def _build_width_map():
# Though it's not listed as a fullwidth character, we'll want to convert
# U+3000 IDEOGRAPHIC SPACE to U+20 SPACE on the same principle, so start
# with that in the dictionary.
width_map = {0x3000: " "}
for i in range(0xFF01, 0xFFF0):
char = chr(i)
alternate = unicodedata.normalize("NFKC", char)
if alternate != char:
width_map[i] = alternate
return width_map
| 28,073 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/fixes.py
|
fix_encoding_and_explain
|
(text)
|
return ftfy.fix_encoding_and_explain(text)
|
Deprecated copy of `ftfy.fix_encoding_and_explain()`.
|
Deprecated copy of `ftfy.fix_encoding_and_explain()`.
| 36 | 44 |
def fix_encoding_and_explain(text):
"""
Deprecated copy of `ftfy.fix_encoding_and_explain()`.
"""
warnings.warn(
"`fix_encoding_and_explain()` has moved to the main module of ftfy.",
DeprecationWarning,
)
return ftfy.fix_encoding_and_explain(text)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/fixes.py#L36-L44
| 44 |
[
0,
1,
2,
3
] | 44.444444 |
[
4,
8
] | 22.222222 | false | 85.897436 | 9 | 1 | 77.777778 | 1 |
def fix_encoding_and_explain(text):
warnings.warn(
"`fix_encoding_and_explain()` has moved to the main module of ftfy.",
DeprecationWarning,
)
return ftfy.fix_encoding_and_explain(text)
| 28,074 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/fixes.py
|
fix_encoding
|
(text)
|
return ftfy.fix_encoding(text)
|
Deprecated copy of `ftfy.fix_encoding()`.
|
Deprecated copy of `ftfy.fix_encoding()`.
| 47 | 54 |
def fix_encoding(text):
"""
Deprecated copy of `ftfy.fix_encoding()`.
"""
warnings.warn(
"`fix_encoding()` has moved to the main module of ftfy.", DeprecationWarning
)
return ftfy.fix_encoding(text)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/fixes.py#L47-L54
| 44 |
[
0,
1,
2,
3
] | 50 |
[
4,
7
] | 25 | false | 85.897436 | 8 | 1 | 75 | 1 |
def fix_encoding(text):
warnings.warn(
"`fix_encoding()` has moved to the main module of ftfy.", DeprecationWarning
)
return ftfy.fix_encoding(text)
| 28,075 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/fixes.py
|
apply_plan
|
(text, plan)
|
return ftfy.apply_plan(text, plan)
|
Deprecated copy of `ftfy.apply_plan()`.
|
Deprecated copy of `ftfy.apply_plan()`.
| 57 | 64 |
def apply_plan(text, plan):
"""
Deprecated copy of `ftfy.apply_plan()`.
"""
warnings.warn(
"`apply_plan()` has moved to the main module of ftfy.", DeprecationWarning
)
return ftfy.apply_plan(text, plan)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/fixes.py#L57-L64
| 44 |
[
0,
1,
2,
3
] | 50 |
[
4,
7
] | 25 | false | 85.897436 | 8 | 1 | 75 | 1 |
def apply_plan(text, plan):
warnings.warn(
"`apply_plan()` has moved to the main module of ftfy.", DeprecationWarning
)
return ftfy.apply_plan(text, plan)
| 28,076 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/fixes.py
|
_unescape_fixup
|
(match)
|
Replace one matched HTML entity with the character it represents,
if possible.
|
Replace one matched HTML entity with the character it represents,
if possible.
| 67 | 85 |
def _unescape_fixup(match):
"""
Replace one matched HTML entity with the character it represents,
if possible.
"""
text = match.group(0)
if text in HTML_ENTITIES:
return HTML_ENTITIES[text]
elif text.startswith("&#"):
unescaped = html.unescape(text)
# If html.unescape only decoded part of the string, that's not what
# we want. The semicolon should be consumed.
if ";" in unescaped:
return text
else:
return unescaped
else:
return text
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/fixes.py#L67-L85
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17
] | 94.736842 |
[
18
] | 5.263158 | false | 85.897436 | 19 | 4 | 94.736842 | 2 |
def _unescape_fixup(match):
text = match.group(0)
if text in HTML_ENTITIES:
return HTML_ENTITIES[text]
elif text.startswith("&#"):
unescaped = html.unescape(text)
# If html.unescape only decoded part of the string, that's not what
# we want. The semicolon should be consumed.
if ";" in unescaped:
return text
else:
return unescaped
else:
return text
| 28,077 |
|
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/fixes.py
|
unescape_html
|
(text)
|
return HTML_ENTITY_RE.sub(_unescape_fixup, text)
|
Decode HTML entities and character references, including some nonstandard
ones written in all-caps.
Python has a built-in called `html.unescape` that can decode HTML escapes,
including a bunch of messy edge cases such as decoding escapes without
semicolons such as "&".
If you know you've got HTML-escaped text, applying `html.unescape` is the
right way to convert it to plain text. But in ambiguous situations, that
would create false positives. For example, the informally written text
"this¬ that" should not automatically be decoded as "this¬ that".
In this function, we decode the escape sequences that appear in the
`html.entities.html5` dictionary, as long as they are the unambiguous ones
that end in semicolons.
We also decode all-caps versions of Latin letters and common symbols.
If a database contains the name 'PÉREZ', we can read that and intuit
that it was supposed to say 'PÉREZ'. This is limited to a smaller set of
entities, because there are many instances where entity names are
case-sensitive in complicated ways.
>>> unescape_html('<tag>')
'<tag>'
>>> unescape_html('𝒥ohn ℋancock')
'𝒥ohn ℋancock'
>>> unescape_html('✓')
'✓'
>>> unescape_html('Pérez')
'Pérez'
>>> unescape_html('P&EACUTE;REZ')
'PÉREZ'
>>> unescape_html('BUNDESSTRA&SZLIG;E')
'BUNDESSTRASSE'
>>> unescape_html('ñ Ñ &NTILDE; &nTILDE;')
'ñ Ñ Ñ &nTILDE;'
|
Decode HTML entities and character references, including some nonstandard
ones written in all-caps.
| 88 | 133 |
def unescape_html(text):
"""
Decode HTML entities and character references, including some nonstandard
ones written in all-caps.
Python has a built-in called `html.unescape` that can decode HTML escapes,
including a bunch of messy edge cases such as decoding escapes without
semicolons such as "&".
If you know you've got HTML-escaped text, applying `html.unescape` is the
right way to convert it to plain text. But in ambiguous situations, that
would create false positives. For example, the informally written text
"this¬ that" should not automatically be decoded as "this¬ that".
In this function, we decode the escape sequences that appear in the
`html.entities.html5` dictionary, as long as they are the unambiguous ones
that end in semicolons.
We also decode all-caps versions of Latin letters and common symbols.
If a database contains the name 'P&EACUTE;REZ', we can read that and intuit
that it was supposed to say 'PÉREZ'. This is limited to a smaller set of
entities, because there are many instances where entity names are
case-sensitive in complicated ways.
>>> unescape_html('<tag>')
'<tag>'
>>> unescape_html('𝒥ohn ℋancock')
'𝒥ohn ℋancock'
>>> unescape_html('✓')
'✓'
>>> unescape_html('Pérez')
'Pérez'
>>> unescape_html('P&EACUTE;REZ')
'PÉREZ'
>>> unescape_html('BUNDESSTRA&SZLIG;E')
'BUNDESSTRASSE'
>>> unescape_html('ñ Ñ &NTILDE; &nTILDE;')
'ñ Ñ Ñ &nTILDE;'
"""
return HTML_ENTITY_RE.sub(_unescape_fixup, text)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/fixes.py#L88-L133
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45
] | 100 |
[] | 0 | true | 85.897436 | 46 | 1 | 100 | 42 |
def unescape_html(text):
return HTML_ENTITY_RE.sub(_unescape_fixup, text)
| 28,078 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/fixes.py
|
remove_terminal_escapes
|
(text)
|
return ANSI_RE.sub("", text)
|
r"""
Strip out "ANSI" terminal escape sequences, such as those that produce
colored text on Unix.
>>> print(remove_terminal_escapes(
... "\033[36;44mI'm blue, da ba dee da ba doo...\033[0m"
... ))
I'm blue, da ba dee da ba doo...
|
r"""
Strip out "ANSI" terminal escape sequences, such as those that produce
colored text on Unix.
| 139 | 149 |
def remove_terminal_escapes(text):
r"""
Strip out "ANSI" terminal escape sequences, such as those that produce
colored text on Unix.
>>> print(remove_terminal_escapes(
... "\033[36;44mI'm blue, da ba dee da ba doo...\033[0m"
... ))
I'm blue, da ba dee da ba doo...
"""
return ANSI_RE.sub("", text)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/fixes.py#L139-L149
| 44 |
[
0,
9,
10
] | 27.272727 |
[] | 0 | false | 85.897436 | 11 | 1 | 100 | 8 |
def remove_terminal_escapes(text):
r"""
Strip out "ANSI" terminal escape sequences, such as those that produce
colored text on Unix.
>>> print(remove_terminal_escapes(
... "\033[36;44mI'm blue, da ba dee da ba doo...\033[0m"
... ))
I'm blue, da ba dee da ba doo...
"""
return ANSI_RE.sub("", text)
| 28,079 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/fixes.py
|
uncurl_quotes
|
(text)
|
return SINGLE_QUOTE_RE.sub("'", DOUBLE_QUOTE_RE.sub('"', text))
|
r"""
Replace curly quotation marks with straight equivalents.
>>> print(uncurl_quotes('\u201chere\u2019s a test\u201d'))
"here's a test"
|
r"""
Replace curly quotation marks with straight equivalents.
| 152 | 159 |
def uncurl_quotes(text):
r"""
Replace curly quotation marks with straight equivalents.
>>> print(uncurl_quotes('\u201chere\u2019s a test\u201d'))
"here's a test"
"""
return SINGLE_QUOTE_RE.sub("'", DOUBLE_QUOTE_RE.sub('"', text))
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/fixes.py#L152-L159
| 44 |
[
0,
6,
7
] | 37.5 |
[] | 0 | false | 85.897436 | 8 | 1 | 100 | 5 |
def uncurl_quotes(text):
r"""
Replace curly quotation marks with straight equivalents.
>>> print(uncurl_quotes('\u201chere\u2019s a test\u201d'))
"here's a test"
"""
return SINGLE_QUOTE_RE.sub("'", DOUBLE_QUOTE_RE.sub('"', text))
| 28,080 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/fixes.py
|
fix_latin_ligatures
|
(text)
|
return text.translate(LIGATURES)
|
Replace single-character ligatures of Latin letters, such as 'fi', with the
characters that they contain, as in 'fi'. Latin ligatures are usually not
intended in text strings (though they're lovely in *rendered* text). If
you have such a ligature in your string, it is probably a result of a
copy-and-paste glitch.
We leave ligatures in other scripts alone to be safe. They may be intended,
and removing them may lose information. If you want to take apart nearly
all ligatures, use NFKC normalization.
>>> print(fix_latin_ligatures("fluffiest"))
fluffiest
|
Replace single-character ligatures of Latin letters, such as 'fi', with the
characters that they contain, as in 'fi'. Latin ligatures are usually not
intended in text strings (though they're lovely in *rendered* text). If
you have such a ligature in your string, it is probably a result of a
copy-and-paste glitch.
| 162 | 177 |
def fix_latin_ligatures(text):
"""
Replace single-character ligatures of Latin letters, such as 'fi', with the
characters that they contain, as in 'fi'. Latin ligatures are usually not
intended in text strings (though they're lovely in *rendered* text). If
you have such a ligature in your string, it is probably a result of a
copy-and-paste glitch.
We leave ligatures in other scripts alone to be safe. They may be intended,
and removing them may lose information. If you want to take apart nearly
all ligatures, use NFKC normalization.
>>> print(fix_latin_ligatures("fluffiest"))
fluffiest
"""
return text.translate(LIGATURES)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/fixes.py#L162-L177
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15
] | 100 |
[] | 0 | true | 85.897436 | 16 | 1 | 100 | 12 |
def fix_latin_ligatures(text):
return text.translate(LIGATURES)
| 28,081 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/fixes.py
|
fix_character_width
|
(text)
|
return text.translate(WIDTH_MAP)
|
The ASCII characters, katakana, and Hangul characters have alternate
"halfwidth" or "fullwidth" forms that help text line up in a grid.
If you don't need these width properties, you probably want to replace
these characters with their standard form, which is what this function
does.
Note that this replaces the ideographic space, U+3000, with the ASCII
space, U+20.
>>> print(fix_character_width("LOUD NOISES"))
LOUD NOISES
>>> print(fix_character_width("Uターン")) # this means "U-turn"
Uターン
|
The ASCII characters, katakana, and Hangul characters have alternate
"halfwidth" or "fullwidth" forms that help text line up in a grid.
| 180 | 197 |
def fix_character_width(text):
"""
The ASCII characters, katakana, and Hangul characters have alternate
"halfwidth" or "fullwidth" forms that help text line up in a grid.
If you don't need these width properties, you probably want to replace
these characters with their standard form, which is what this function
does.
Note that this replaces the ideographic space, U+3000, with the ASCII
space, U+20.
>>> print(fix_character_width("LOUD NOISES"))
LOUD NOISES
>>> print(fix_character_width("Uターン")) # this means "U-turn"
Uターン
"""
return text.translate(WIDTH_MAP)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/fixes.py#L180-L197
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17
] | 100 |
[] | 0 | true | 85.897436 | 18 | 1 | 100 | 14 |
def fix_character_width(text):
return text.translate(WIDTH_MAP)
| 28,082 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/fixes.py
|
fix_line_breaks
|
(text)
|
return (
text.replace("\r\n", "\n")
.replace("\r", "\n")
.replace("\u2028", "\n")
.replace("\u2029", "\n")
.replace("\u0085", "\n")
)
|
r"""
Convert all line breaks to Unix style.
This will convert the following sequences into the standard \\n
line break:
- CRLF (\\r\\n), used on Windows and in some communication protocols
- CR (\\r), once used on Mac OS Classic, and now kept alive by misguided
software such as Microsoft Office for Mac
- LINE SEPARATOR (\\u2028) and PARAGRAPH SEPARATOR (\\u2029), defined by
Unicode and used to sow confusion and discord
- NEXT LINE (\\x85), a C1 control character that is certainly not what you
meant
The NEXT LINE character is a bit of an odd case, because it
usually won't show up if `fix_encoding` is also being run.
\\x85 is very common mojibake for \\u2026, HORIZONTAL ELLIPSIS.
>>> print(fix_line_breaks(
... "This string is made of two things:\u2029"
... "1. Unicode\u2028"
... "2. Spite"
... ))
This string is made of two things:
1. Unicode
2. Spite
For further testing and examples, let's define a function to make sure
we can see the control characters in their escaped form:
>>> def eprint(text):
... print(text.encode('unicode-escape').decode('ascii'))
>>> eprint(fix_line_breaks("Content-type: text/plain\r\n\r\nHi."))
Content-type: text/plain\n\nHi.
>>> eprint(fix_line_breaks("This is how Microsoft \r trolls Mac users"))
This is how Microsoft \n trolls Mac users
>>> eprint(fix_line_breaks("What is this \x85 I don't even"))
What is this \n I don't even
|
r"""
Convert all line breaks to Unix style.
| 200 | 249 |
def fix_line_breaks(text):
r"""
Convert all line breaks to Unix style.
This will convert the following sequences into the standard \\n
line break:
- CRLF (\\r\\n), used on Windows and in some communication protocols
- CR (\\r), once used on Mac OS Classic, and now kept alive by misguided
software such as Microsoft Office for Mac
- LINE SEPARATOR (\\u2028) and PARAGRAPH SEPARATOR (\\u2029), defined by
Unicode and used to sow confusion and discord
- NEXT LINE (\\x85), a C1 control character that is certainly not what you
meant
The NEXT LINE character is a bit of an odd case, because it
usually won't show up if `fix_encoding` is also being run.
\\x85 is very common mojibake for \\u2026, HORIZONTAL ELLIPSIS.
>>> print(fix_line_breaks(
... "This string is made of two things:\u2029"
... "1. Unicode\u2028"
... "2. Spite"
... ))
This string is made of two things:
1. Unicode
2. Spite
For further testing and examples, let's define a function to make sure
we can see the control characters in their escaped form:
>>> def eprint(text):
... print(text.encode('unicode-escape').decode('ascii'))
>>> eprint(fix_line_breaks("Content-type: text/plain\r\n\r\nHi."))
Content-type: text/plain\n\nHi.
>>> eprint(fix_line_breaks("This is how Microsoft \r trolls Mac users"))
This is how Microsoft \n trolls Mac users
>>> eprint(fix_line_breaks("What is this \x85 I don't even"))
What is this \n I don't even
"""
return (
text.replace("\r\n", "\n")
.replace("\r", "\n")
.replace("\u2028", "\n")
.replace("\u2029", "\n")
.replace("\u0085", "\n")
)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/fixes.py#L200-L249
| 44 |
[
0,
42,
43,
44,
45,
46,
47,
48,
49
] | 18 |
[] | 0 | false | 85.897436 | 50 | 1 | 100 | 41 |
def fix_line_breaks(text):
r"""
Convert all line breaks to Unix style.
This will convert the following sequences into the standard \\n
line break:
- CRLF (\\r\\n), used on Windows and in some communication protocols
- CR (\\r), once used on Mac OS Classic, and now kept alive by misguided
software such as Microsoft Office for Mac
- LINE SEPARATOR (\\u2028) and PARAGRAPH SEPARATOR (\\u2029), defined by
Unicode and used to sow confusion and discord
- NEXT LINE (\\x85), a C1 control character that is certainly not what you
meant
The NEXT LINE character is a bit of an odd case, because it
usually won't show up if `fix_encoding` is also being run.
\\x85 is very common mojibake for \\u2026, HORIZONTAL ELLIPSIS.
>>> print(fix_line_breaks(
... "This string is made of two things:\u2029"
... "1. Unicode\u2028"
... "2. Spite"
... ))
This string is made of two things:
1. Unicode
2. Spite
For further testing and examples, let's define a function to make sure
we can see the control characters in their escaped form:
>>> def eprint(text):
... print(text.encode('unicode-escape').decode('ascii'))
>>> eprint(fix_line_breaks("Content-type: text/plain\r\n\r\nHi."))
Content-type: text/plain\n\nHi.
>>> eprint(fix_line_breaks("This is how Microsoft \r trolls Mac users"))
This is how Microsoft \n trolls Mac users
>>> eprint(fix_line_breaks("What is this \x85 I don't even"))
What is this \n I don't even
"""
return (
text.replace("\r\n", "\n")
.replace("\r", "\n")
.replace("\u2028", "\n")
.replace("\u2029", "\n")
.replace("\u0085", "\n")
)
| 28,083 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/fixes.py
|
convert_surrogate_pair
|
(match)
|
return chr(codept)
|
Convert a surrogate pair to the single codepoint it represents.
This implements the formula described at:
http://en.wikipedia.org/wiki/Universal_Character_Set_characters#Surrogates
|
Convert a surrogate pair to the single codepoint it represents.
| 256 | 265 |
def convert_surrogate_pair(match):
"""
Convert a surrogate pair to the single codepoint it represents.
This implements the formula described at:
http://en.wikipedia.org/wiki/Universal_Character_Set_characters#Surrogates
"""
pair = match.group(0)
codept = 0x10000 + (ord(pair[0]) - 0xD800) * 0x400 + (ord(pair[1]) - 0xDC00)
return chr(codept)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/fixes.py#L256-L265
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9
] | 100 |
[] | 0 | true | 85.897436 | 10 | 1 | 100 | 4 |
def convert_surrogate_pair(match):
pair = match.group(0)
codept = 0x10000 + (ord(pair[0]) - 0xD800) * 0x400 + (ord(pair[1]) - 0xDC00)
return chr(codept)
| 28,084 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/fixes.py
|
fix_surrogates
|
(text)
|
return text
|
Replace 16-bit surrogate codepoints with the characters they represent
(when properly paired), or with \ufffd otherwise.
>>> high_surrogate = chr(0xd83d)
>>> low_surrogate = chr(0xdca9)
>>> print(fix_surrogates(high_surrogate + low_surrogate))
💩
>>> print(fix_surrogates(low_surrogate + high_surrogate))
��
The above doctest had to be very carefully written, because even putting
the Unicode escapes of the surrogates in the docstring was causing
various tools to fail, which I think just goes to show why this fixer is
necessary.
|
Replace 16-bit surrogate codepoints with the characters they represent
(when properly paired), or with \ufffd otherwise.
| 268 | 288 |
def fix_surrogates(text):
"""
Replace 16-bit surrogate codepoints with the characters they represent
(when properly paired), or with \ufffd otherwise.
>>> high_surrogate = chr(0xd83d)
>>> low_surrogate = chr(0xdca9)
>>> print(fix_surrogates(high_surrogate + low_surrogate))
💩
>>> print(fix_surrogates(low_surrogate + high_surrogate))
��
The above doctest had to be very carefully written, because even putting
the Unicode escapes of the surrogates in the docstring was causing
various tools to fail, which I think just goes to show why this fixer is
necessary.
"""
if SURROGATE_RE.search(text):
text = SURROGATE_PAIR_RE.sub(convert_surrogate_pair, text)
text = SURROGATE_RE.sub("\ufffd", text)
return text
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/fixes.py#L268-L288
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20
] | 100 |
[] | 0 | true | 85.897436 | 21 | 2 | 100 | 14 |
def fix_surrogates(text):
if SURROGATE_RE.search(text):
text = SURROGATE_PAIR_RE.sub(convert_surrogate_pair, text)
text = SURROGATE_RE.sub("\ufffd", text)
return text
| 28,085 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/fixes.py
|
remove_control_chars
|
(text)
|
return text.translate(CONTROL_CHARS)
|
Remove various control characters that you probably didn't intend to be in
your text. Many of these characters appear in the table of "Characters not
suitable for use with markup" at
http://www.unicode.org/reports/tr20/tr20-9.html.
This includes:
- ASCII control characters, except for the important whitespace characters
(U+00 to U+08, U+0B, U+0E to U+1F, U+7F)
- Deprecated Arabic control characters (U+206A to U+206F)
- Interlinear annotation characters (U+FFF9 to U+FFFB)
- The Object Replacement Character (U+FFFC)
- The byte order mark (U+FEFF)
However, these similar characters are left alone:
- Control characters that produce whitespace (U+09, U+0A, U+0C, U+0D,
U+2028, and U+2029)
- C1 control characters (U+80 to U+9F) -- even though they are basically
never used intentionally, they are important clues about what mojibake
has happened
- Control characters that affect glyph rendering, such as joiners and
right-to-left marks (U+200C to U+200F, U+202A to U+202E)
- Musical notation control characters (U+1D173 to U+1D17A) because wow if
you're using those you probably have a good reason
- Tag characters, because they are now used in emoji sequences such as
"Flag of Wales"
|
Remove various control characters that you probably didn't intend to be in
your text. Many of these characters appear in the table of "Characters not
suitable for use with markup" at
http://www.unicode.org/reports/tr20/tr20-9.html.
| 291 | 321 |
def remove_control_chars(text):
"""
Remove various control characters that you probably didn't intend to be in
your text. Many of these characters appear in the table of "Characters not
suitable for use with markup" at
http://www.unicode.org/reports/tr20/tr20-9.html.
This includes:
- ASCII control characters, except for the important whitespace characters
(U+00 to U+08, U+0B, U+0E to U+1F, U+7F)
- Deprecated Arabic control characters (U+206A to U+206F)
- Interlinear annotation characters (U+FFF9 to U+FFFB)
- The Object Replacement Character (U+FFFC)
- The byte order mark (U+FEFF)
However, these similar characters are left alone:
- Control characters that produce whitespace (U+09, U+0A, U+0C, U+0D,
U+2028, and U+2029)
- C1 control characters (U+80 to U+9F) -- even though they are basically
never used intentionally, they are important clues about what mojibake
has happened
- Control characters that affect glyph rendering, such as joiners and
right-to-left marks (U+200C to U+200F, U+202A to U+202E)
- Musical notation control characters (U+1D173 to U+1D17A) because wow if
you're using those you probably have a good reason
- Tag characters, because they are now used in emoji sequences such as
"Flag of Wales"
"""
return text.translate(CONTROL_CHARS)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/fixes.py#L291-L321
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30
] | 100 |
[] | 0 | true | 85.897436 | 31 | 1 | 100 | 27 |
def remove_control_chars(text):
return text.translate(CONTROL_CHARS)
| 28,086 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/fixes.py
|
remove_bom
|
(text)
|
return text.lstrip(chr(0xFEFF))
|
r"""
Remove a byte-order mark that was accidentally decoded as if it were part
of the text.
>>> print(remove_bom(chr(0xfeff) + "Where do you want to go today?"))
Where do you want to go today?
|
r"""
Remove a byte-order mark that was accidentally decoded as if it were part
of the text.
| 324 | 332 |
def remove_bom(text):
r"""
Remove a byte-order mark that was accidentally decoded as if it were part
of the text.
>>> print(remove_bom(chr(0xfeff) + "Where do you want to go today?"))
Where do you want to go today?
"""
return text.lstrip(chr(0xFEFF))
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/fixes.py#L324-L332
| 44 |
[
0
] | 11.111111 |
[
8
] | 11.111111 | false | 85.897436 | 9 | 1 | 88.888889 | 6 |
def remove_bom(text):
r"""
Remove a byte-order mark that was accidentally decoded as if it were part
of the text.
>>> print(remove_bom(chr(0xfeff) + "Where do you want to go today?"))
Where do you want to go today?
"""
return text.lstrip(chr(0xFEFF))
| 28,087 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/fixes.py
|
decode_escapes
|
(text)
|
return ESCAPE_SEQUENCE_RE.sub(decode_match, text)
|
r"""
Decode backslashed escape sequences, including \\x, \\u, and \\U character
references, even in the presence of other Unicode.
This function has to be called specifically. It's not run automatically by
ftfy, because escaped text is not necessarily a mistake, and there is no
way to distinguish when it is.
This is what Python's "string-escape" and "unicode-escape" codecs were
meant to do, but in contrast, this actually works. It will decode the
string exactly the same way that the Python interpreter decodes its string
literals.
>>> factoid = '\\u20a1 is the currency symbol for the colón.'
>>> print(factoid[1:])
u20a1 is the currency symbol for the colón.
>>> print(decode_escapes(factoid))
₡ is the currency symbol for the colón.
Even though Python itself can read string literals with a combination of
escapes and literal Unicode -- you're looking at one right now -- the
"unicode-escape" codec doesn't work on literal Unicode. (See
http://stackoverflow.com/a/24519338/773754 for more details.)
Instead, this function searches for just the parts of a string that
represent escape sequences, and decodes them, leaving the rest alone. All
valid escape sequences are made of ASCII characters, and this allows
"unicode-escape" to work correctly.
|
r"""
Decode backslashed escape sequences, including \\x, \\u, and \\U character
references, even in the presence of other Unicode.
| 349 | 384 |
def decode_escapes(text):
r"""
Decode backslashed escape sequences, including \\x, \\u, and \\U character
references, even in the presence of other Unicode.
This function has to be called specifically. It's not run automatically by
ftfy, because escaped text is not necessarily a mistake, and there is no
way to distinguish when it is.
This is what Python's "string-escape" and "unicode-escape" codecs were
meant to do, but in contrast, this actually works. It will decode the
string exactly the same way that the Python interpreter decodes its string
literals.
>>> factoid = '\\u20a1 is the currency symbol for the colón.'
>>> print(factoid[1:])
u20a1 is the currency symbol for the colón.
>>> print(decode_escapes(factoid))
₡ is the currency symbol for the colón.
Even though Python itself can read string literals with a combination of
escapes and literal Unicode -- you're looking at one right now -- the
"unicode-escape" codec doesn't work on literal Unicode. (See
http://stackoverflow.com/a/24519338/773754 for more details.)
Instead, this function searches for just the parts of a string that
represent escape sequences, and decodes them, leaving the rest alone. All
valid escape sequences are made of ASCII characters, and this allows
"unicode-escape" to work correctly.
"""
def decode_match(match):
"Given a regex match, decode the escape sequence it contains."
return codecs.decode(match.group(0), "unicode-escape")
return ESCAPE_SEQUENCE_RE.sub(decode_match, text)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/fixes.py#L349-L384
| 44 |
[
0
] | 2.777778 |
[
31,
33,
35
] | 8.333333 | false | 85.897436 | 36 | 2 | 91.666667 | 28 |
def decode_escapes(text):
r"""
Decode backslashed escape sequences, including \\x, \\u, and \\U character
references, even in the presence of other Unicode.
This function has to be called specifically. It's not run automatically by
ftfy, because escaped text is not necessarily a mistake, and there is no
way to distinguish when it is.
This is what Python's "string-escape" and "unicode-escape" codecs were
meant to do, but in contrast, this actually works. It will decode the
string exactly the same way that the Python interpreter decodes its string
literals.
>>> factoid = '\\u20a1 is the currency symbol for the colón.'
>>> print(factoid[1:])
u20a1 is the currency symbol for the colón.
>>> print(decode_escapes(factoid))
₡ is the currency symbol for the colón.
Even though Python itself can read string literals with a combination of
escapes and literal Unicode -- you're looking at one right now -- the
"unicode-escape" codec doesn't work on literal Unicode. (See
http://stackoverflow.com/a/24519338/773754 for more details.)
Instead, this function searches for just the parts of a string that
represent escape sequences, and decodes them, leaving the rest alone. All
valid escape sequences are made of ASCII characters, and this allows
"unicode-escape" to work correctly.
"""
def decode_match(match):
"Given a regex match, decode the escape sequence it contains."
return codecs.decode(match.group(0), "unicode-escape")
return ESCAPE_SEQUENCE_RE.sub(decode_match, text)
| 28,088 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/fixes.py
|
restore_byte_a0
|
(byts)
|
return ALTERED_UTF8_RE.sub(replacement, byts)
|
Some mojibake has been additionally altered by a process that said "hmm,
byte A0, that's basically a space!" and replaced it with an ASCII space.
When the A0 is part of a sequence that we intend to decode as UTF-8,
changing byte A0 to 20 would make it fail to decode.
This process finds sequences that would convincingly decode as UTF-8 if
byte 20 were changed to A0, and puts back the A0. For the purpose of
deciding whether this is a good idea, this step gets a cost of twice
the number of bytes that are changed.
This is used as a step within `fix_encoding`.
|
Some mojibake has been additionally altered by a process that said "hmm,
byte A0, that's basically a space!" and replaced it with an ASCII space.
When the A0 is part of a sequence that we intend to decode as UTF-8,
changing byte A0 to 20 would make it fail to decode.
| 413 | 433 |
def restore_byte_a0(byts):
"""
Some mojibake has been additionally altered by a process that said "hmm,
byte A0, that's basically a space!" and replaced it with an ASCII space.
When the A0 is part of a sequence that we intend to decode as UTF-8,
changing byte A0 to 20 would make it fail to decode.
This process finds sequences that would convincingly decode as UTF-8 if
byte 20 were changed to A0, and puts back the A0. For the purpose of
deciding whether this is a good idea, this step gets a cost of twice
the number of bytes that are changed.
This is used as a step within `fix_encoding`.
"""
byts = A_GRAVE_WORD_RE.sub(b"\xc3\xa0 ", byts)
def replacement(match):
"The function to apply when this regex matches."
return match.group(0).replace(b"\x20", b"\xa0")
return ALTERED_UTF8_RE.sub(replacement, byts)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/fixes.py#L413-L433
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20
] | 100 |
[] | 0 | true | 85.897436 | 21 | 2 | 100 | 11 |
def restore_byte_a0(byts):
byts = A_GRAVE_WORD_RE.sub(b"\xc3\xa0 ", byts)
def replacement(match):
"The function to apply when this regex matches."
return match.group(0).replace(b"\x20", b"\xa0")
return ALTERED_UTF8_RE.sub(replacement, byts)
| 28,089 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/fixes.py
|
replace_lossy_sequences
|
(byts)
|
return LOSSY_UTF8_RE.sub("\ufffd".encode("utf-8"), byts)
|
This function identifies sequences where information has been lost in
a "sloppy" codec, indicated by byte 1A, and if they would otherwise look
like a UTF-8 sequence, it replaces them with the UTF-8 sequence for U+FFFD.
A further explanation:
ftfy can now fix text in a few cases that it would previously fix
incompletely, because of the fact that it can't successfully apply the fix
to the entire string. A very common case of this is when characters have
been erroneously decoded as windows-1252, but instead of the "sloppy"
windows-1252 that passes through unassigned bytes, the unassigned bytes get
turned into U+FFFD (�), so we can't tell what they were.
This most commonly happens with curly quotation marks that appear
``“ like this �``.
We can do better by building on ftfy's "sloppy codecs" to let them handle
less-sloppy but more-lossy text. When they encounter the character ``�``,
instead of refusing to encode it, they encode it as byte 1A -- an
ASCII control code called SUBSTITUTE that once was meant for about the same
purpose. We can then apply a fixer that looks for UTF-8 sequences where
some continuation bytes have been replaced by byte 1A, and decode the whole
sequence as �; if that doesn't work, it'll just turn the byte back into �
itself.
As a result, the above text ``“ like this �`` will decode as
``“ like this �``.
If U+1A was actually in the original string, then the sloppy codecs will
not be used, and this function will not be run, so your weird control
character will be left alone but wacky fixes like this won't be possible.
This is used as a transcoder within `fix_encoding`.
|
This function identifies sequences where information has been lost in
a "sloppy" codec, indicated by byte 1A, and if they would otherwise look
like a UTF-8 sequence, it replaces them with the UTF-8 sequence for U+FFFD.
| 436 | 472 |
def replace_lossy_sequences(byts):
"""
This function identifies sequences where information has been lost in
a "sloppy" codec, indicated by byte 1A, and if they would otherwise look
like a UTF-8 sequence, it replaces them with the UTF-8 sequence for U+FFFD.
A further explanation:
ftfy can now fix text in a few cases that it would previously fix
incompletely, because of the fact that it can't successfully apply the fix
to the entire string. A very common case of this is when characters have
been erroneously decoded as windows-1252, but instead of the "sloppy"
windows-1252 that passes through unassigned bytes, the unassigned bytes get
turned into U+FFFD (�), so we can't tell what they were.
This most commonly happens with curly quotation marks that appear
``“ like this �``.
We can do better by building on ftfy's "sloppy codecs" to let them handle
less-sloppy but more-lossy text. When they encounter the character ``�``,
instead of refusing to encode it, they encode it as byte 1A -- an
ASCII control code called SUBSTITUTE that once was meant for about the same
purpose. We can then apply a fixer that looks for UTF-8 sequences where
some continuation bytes have been replaced by byte 1A, and decode the whole
sequence as �; if that doesn't work, it'll just turn the byte back into �
itself.
As a result, the above text ``“ like this �`` will decode as
``“ like this �``.
If U+1A was actually in the original string, then the sloppy codecs will
not be used, and this function will not be run, so your weird control
character will be left alone but wacky fixes like this won't be possible.
This is used as a transcoder within `fix_encoding`.
"""
return LOSSY_UTF8_RE.sub("\ufffd".encode("utf-8"), byts)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/fixes.py#L436-L472
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36
] | 100 |
[] | 0 | true | 85.897436 | 37 | 1 | 100 | 33 |
def replace_lossy_sequences(byts):
return LOSSY_UTF8_RE.sub("\ufffd".encode("utf-8"), byts)
| 28,090 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/fixes.py
|
decode_inconsistent_utf8
|
(text)
|
return UTF8_DETECTOR_RE.sub(fix_embedded_mojibake, text)
|
Sometimes, text from one encoding ends up embedded within text from a
different one. This is common enough that we need to be able to fix it.
This is used as a transcoder within `fix_encoding`.
|
Sometimes, text from one encoding ends up embedded within text from a
different one. This is common enough that we need to be able to fix it.
| 475 | 492 |
def decode_inconsistent_utf8(text):
"""
Sometimes, text from one encoding ends up embedded within text from a
different one. This is common enough that we need to be able to fix it.
This is used as a transcoder within `fix_encoding`.
"""
def fix_embedded_mojibake(match):
substr = match.group(0)
# Require the match to be shorter, so that this doesn't recurse infinitely
if len(substr) < len(text) and is_bad(substr):
return ftfy.fix_encoding(substr)
else:
return substr
return UTF8_DETECTOR_RE.sub(fix_embedded_mojibake, text)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/fixes.py#L475-L492
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17
] | 100 |
[] | 0 | true | 85.897436 | 18 | 4 | 100 | 4 |
def decode_inconsistent_utf8(text):
def fix_embedded_mojibake(match):
substr = match.group(0)
# Require the match to be shorter, so that this doesn't recurse infinitely
if len(substr) < len(text) and is_bad(substr):
return ftfy.fix_encoding(substr)
else:
return substr
return UTF8_DETECTOR_RE.sub(fix_embedded_mojibake, text)
| 28,091 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/fixes.py
|
_c1_fixer
|
(match)
|
return match.group(0).encode("latin-1").decode("sloppy-windows-1252")
| 495 | 496 |
def _c1_fixer(match):
return match.group(0).encode("latin-1").decode("sloppy-windows-1252")
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/fixes.py#L495-L496
| 44 |
[
0,
1
] | 100 |
[] | 0 | true | 85.897436 | 2 | 1 | 100 | 0 |
def _c1_fixer(match):
return match.group(0).encode("latin-1").decode("sloppy-windows-1252")
| 28,092 |
||
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/fixes.py
|
fix_c1_controls
|
(text)
|
return C1_CONTROL_RE.sub(_c1_fixer, text)
|
If text still contains C1 control characters, treat them as their
Windows-1252 equivalents. This matches what Web browsers do.
|
If text still contains C1 control characters, treat them as their
Windows-1252 equivalents. This matches what Web browsers do.
| 499 | 504 |
def fix_c1_controls(text):
"""
If text still contains C1 control characters, treat them as their
Windows-1252 equivalents. This matches what Web browsers do.
"""
return C1_CONTROL_RE.sub(_c1_fixer, text)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/fixes.py#L499-L504
| 44 |
[
0,
1,
2,
3,
4,
5
] | 100 |
[] | 0 | true | 85.897436 | 6 | 1 | 100 | 2 |
def fix_c1_controls(text):
return C1_CONTROL_RE.sub(_c1_fixer, text)
| 28,093 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/badness.py
|
sequence_weirdness
|
(text: str)
|
return badness(text)
|
This was the name of the heuristic used in ftfy 2.x through 5.x. As an
attempt at compatibility with external code that calls the heuristic
directly, we redirect to our new heuristic, :func:`badness`.
|
This was the name of the heuristic used in ftfy 2.x through 5.x. As an
attempt at compatibility with external code that calls the heuristic
directly, we redirect to our new heuristic, :func:`badness`.
| 362 | 372 |
def sequence_weirdness(text: str) -> int:
"""
This was the name of the heuristic used in ftfy 2.x through 5.x. As an
attempt at compatibility with external code that calls the heuristic
directly, we redirect to our new heuristic, :func:`badness`.
"""
warnings.warn(
"`sequence_weirdness()` is an old heuristic, and the current "
"closest equivalent is `ftfy.badness.badness()`"
)
return badness(text)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/badness.py#L362-L372
| 44 |
[
0,
1,
2,
3,
4,
5
] | 54.545455 |
[
6,
10
] | 18.181818 | false | 72.727273 | 11 | 1 | 81.818182 | 3 |
def sequence_weirdness(text: str) -> int:
warnings.warn(
"`sequence_weirdness()` is an old heuristic, and the current "
"closest equivalent is `ftfy.badness.badness()`"
)
return badness(text)
| 28,094 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/badness.py
|
badness
|
(text: str)
|
return len(BADNESS_RE.findall(text))
|
Get the 'badness' of a sequence of text, counting the number of unlikely
character sequences. A badness greater than 0 indicates that some of it
seems to be mojibake.
|
Get the 'badness' of a sequence of text, counting the number of unlikely
character sequences. A badness greater than 0 indicates that some of it
seems to be mojibake.
| 375 | 381 |
def badness(text: str) -> int:
"""
Get the 'badness' of a sequence of text, counting the number of unlikely
character sequences. A badness greater than 0 indicates that some of it
seems to be mojibake.
"""
return len(BADNESS_RE.findall(text))
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/badness.py#L375-L381
| 44 |
[
0,
1,
2,
3,
4,
5
] | 85.714286 |
[
6
] | 14.285714 | false | 72.727273 | 7 | 1 | 85.714286 | 3 |
def badness(text: str) -> int:
return len(BADNESS_RE.findall(text))
| 28,095 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/badness.py
|
is_bad
|
(text: str)
|
return bool(BADNESS_RE.search(text))
|
Returns true iff the given text looks like it contains mojibake.
This can be faster than `badness`, because it returns when the first match
is found to a regex instead of counting matches. Note that as strings get
longer, they have a higher chance of returning True for `is_bad(string)`.
|
Returns true iff the given text looks like it contains mojibake.
| 384 | 392 |
def is_bad(text: str) -> bool:
"""
Returns true iff the given text looks like it contains mojibake.
This can be faster than `badness`, because it returns when the first match
is found to a regex instead of counting matches. Note that as strings get
longer, they have a higher chance of returning True for `is_bad(string)`.
"""
return bool(BADNESS_RE.search(text))
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/badness.py#L384-L392
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8
] | 100 |
[] | 0 | true | 72.727273 | 9 | 1 | 100 | 5 |
def is_bad(text: str) -> bool:
return bool(BADNESS_RE.search(text))
| 28,096 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/formatting.py
|
character_width
|
(char: str)
|
return wcwidth(char)
|
r"""
Determine the width that a character is likely to be displayed as in
a monospaced terminal. The width for a printable character will
always be 0, 1, or 2.
Nonprintable or control characters will return -1, a convention that comes
from wcwidth.
>>> character_width('車')
2
>>> character_width('A')
1
>>> character_width('\N{ZERO WIDTH JOINER}')
0
>>> character_width('\n')
-1
|
r"""
Determine the width that a character is likely to be displayed as in
a monospaced terminal. The width for a printable character will
always be 0, 1, or 2.
| 14 | 32 |
def character_width(char: str) -> int:
r"""
Determine the width that a character is likely to be displayed as in
a monospaced terminal. The width for a printable character will
always be 0, 1, or 2.
Nonprintable or control characters will return -1, a convention that comes
from wcwidth.
>>> character_width('車')
2
>>> character_width('A')
1
>>> character_width('\N{ZERO WIDTH JOINER}')
0
>>> character_width('\n')
-1
"""
return wcwidth(char)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/formatting.py#L14-L32
| 44 |
[
0
] | 5.263158 |
[
18
] | 5.263158 | false | 24.242424 | 19 | 1 | 94.736842 | 16 |
def character_width(char: str) -> int:
r"""
Determine the width that a character is likely to be displayed as in
a monospaced terminal. The width for a printable character will
always be 0, 1, or 2.
Nonprintable or control characters will return -1, a convention that comes
from wcwidth.
>>> character_width('車')
2
>>> character_width('A')
1
>>> character_width('\N{ZERO WIDTH JOINER}')
0
>>> character_width('\n')
-1
"""
return wcwidth(char)
| 28,097 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/formatting.py
|
monospaced_width
|
(text: str)
|
return wcswidth(remove_terminal_escapes(normalize("NFC", text)))
|
r"""
Return the number of character cells that this string is likely to occupy
when displayed in a monospaced, modern, Unicode-aware terminal emulator.
We refer to this as the "display width" of the string.
This can be useful for formatting text that may contain non-spacing
characters, or CJK characters that take up two character cells.
Returns -1 if the string contains a non-printable or control character.
>>> monospaced_width('ちゃぶ台返し')
12
>>> len('ちゃぶ台返し')
6
>>> monospaced_width('owl\N{SOFT HYPHEN}flavored')
12
>>> monospaced_width('example\x80')
-1
A more complex example: The Korean word 'ibnida' can be written with 3
pre-composed characters or 7 jamo. Either way, it *looks* the same and
takes up 6 character cells.
>>> monospaced_width('입니다')
6
>>> monospaced_width('\u110b\u1175\u11b8\u1102\u1175\u1103\u1161')
6
The word "blue" with terminal escapes to make it blue still takes up only
4 characters, when shown as intended.
>>> monospaced_width('\x1b[34mblue\x1b[m')
4
|
r"""
Return the number of character cells that this string is likely to occupy
when displayed in a monospaced, modern, Unicode-aware terminal emulator.
We refer to this as the "display width" of the string.
| 35 | 74 |
def monospaced_width(text: str) -> int:
r"""
Return the number of character cells that this string is likely to occupy
when displayed in a monospaced, modern, Unicode-aware terminal emulator.
We refer to this as the "display width" of the string.
This can be useful for formatting text that may contain non-spacing
characters, or CJK characters that take up two character cells.
Returns -1 if the string contains a non-printable or control character.
>>> monospaced_width('ちゃぶ台返し')
12
>>> len('ちゃぶ台返し')
6
>>> monospaced_width('owl\N{SOFT HYPHEN}flavored')
12
>>> monospaced_width('example\x80')
-1
A more complex example: The Korean word 'ibnida' can be written with 3
pre-composed characters or 7 jamo. Either way, it *looks* the same and
takes up 6 character cells.
>>> monospaced_width('입니다')
6
>>> monospaced_width('\u110b\u1175\u11b8\u1102\u1175\u1103\u1161')
6
The word "blue" with terminal escapes to make it blue still takes up only
4 characters, when shown as intended.
>>> monospaced_width('\x1b[34mblue\x1b[m')
4
"""
# NFC-normalize the text first, so that we don't need special cases for
# Hangul jamo.
#
# Remove terminal escapes before calculating width, because if they are
# displayed as intended, they will have zero width.
return wcswidth(remove_terminal_escapes(normalize("NFC", text)))
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/formatting.py#L35-L74
| 44 |
[
0
] | 2.5 |
[
39
] | 2.5 | false | 24.242424 | 40 | 1 | 97.5 | 32 |
def monospaced_width(text: str) -> int:
r"""
Return the number of character cells that this string is likely to occupy
when displayed in a monospaced, modern, Unicode-aware terminal emulator.
We refer to this as the "display width" of the string.
This can be useful for formatting text that may contain non-spacing
characters, or CJK characters that take up two character cells.
Returns -1 if the string contains a non-printable or control character.
>>> monospaced_width('ちゃぶ台返し')
12
>>> len('ちゃぶ台返し')
6
>>> monospaced_width('owl\N{SOFT HYPHEN}flavored')
12
>>> monospaced_width('example\x80')
-1
A more complex example: The Korean word 'ibnida' can be written with 3
pre-composed characters or 7 jamo. Either way, it *looks* the same and
takes up 6 character cells.
>>> monospaced_width('입니다')
6
>>> monospaced_width('\u110b\u1175\u11b8\u1102\u1175\u1103\u1161')
6
The word "blue" with terminal escapes to make it blue still takes up only
4 characters, when shown as intended.
>>> monospaced_width('\x1b[34mblue\x1b[m')
4
"""
# NFC-normalize the text first, so that we don't need special cases for
# Hangul jamo.
#
# Remove terminal escapes before calculating width, because if they are
# displayed as intended, they will have zero width.
return wcswidth(remove_terminal_escapes(normalize("NFC", text)))
| 28,098 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/formatting.py
|
display_ljust
|
(text, width, fillchar=" ")
|
return text + fillchar * padding
|
Return `text` left-justified in a Unicode string whose display width,
in a monospaced terminal, should be at least `width` character cells.
The rest of the string will be padded with `fillchar`, which must be
a width-1 character.
"Left" here means toward the beginning of the string, which may actually
appear on the right in an RTL context. This is similar to the use of the
word "left" in "left parenthesis".
>>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し']
>>> for line in lines:
... print(display_ljust(line, 20, '▒'))
Table flip▒▒▒▒▒▒▒▒▒▒
(╯°□°)╯︵ ┻━┻▒▒▒▒▒▒▒
ちゃぶ台返し▒▒▒▒▒▒▒▒
This example, and the similar ones that follow, should come out justified
correctly when viewed in a monospaced terminal. It will probably not look
correct if you're viewing this code or documentation in a Web browser.
|
Return `text` left-justified in a Unicode string whose display width,
in a monospaced terminal, should be at least `width` character cells.
The rest of the string will be padded with `fillchar`, which must be
a width-1 character.
| 77 | 108 |
def display_ljust(text, width, fillchar=" "):
"""
Return `text` left-justified in a Unicode string whose display width,
in a monospaced terminal, should be at least `width` character cells.
The rest of the string will be padded with `fillchar`, which must be
a width-1 character.
"Left" here means toward the beginning of the string, which may actually
appear on the right in an RTL context. This is similar to the use of the
word "left" in "left parenthesis".
>>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し']
>>> for line in lines:
... print(display_ljust(line, 20, '▒'))
Table flip▒▒▒▒▒▒▒▒▒▒
(╯°□°)╯︵ ┻━┻▒▒▒▒▒▒▒
ちゃぶ台返し▒▒▒▒▒▒▒▒
This example, and the similar ones that follow, should come out justified
correctly when viewed in a monospaced terminal. It will probably not look
correct if you're viewing this code or documentation in a Web browser.
"""
if character_width(fillchar) != 1:
raise ValueError("The padding character must have display width 1")
text_width = monospaced_width(text)
if text_width == -1:
# There's a control character here, so just don't add padding
return text
padding = max(0, width - text_width)
return text + fillchar * padding
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/formatting.py#L77-L108
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21
] | 68.75 |
[
22,
23,
25,
26,
28,
30,
31
] | 21.875 | false | 24.242424 | 32 | 3 | 78.125 | 19 |
def display_ljust(text, width, fillchar=" "):
if character_width(fillchar) != 1:
raise ValueError("The padding character must have display width 1")
text_width = monospaced_width(text)
if text_width == -1:
# There's a control character here, so just don't add padding
return text
padding = max(0, width - text_width)
return text + fillchar * padding
| 28,099 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/formatting.py
|
display_rjust
|
(text, width, fillchar=" ")
|
return fillchar * padding + text
|
Return `text` right-justified in a Unicode string whose display width,
in a monospaced terminal, should be at least `width` character cells.
The rest of the string will be padded with `fillchar`, which must be
a width-1 character.
"Right" here means toward the end of the string, which may actually be on
the left in an RTL context. This is similar to the use of the word "right"
in "right parenthesis".
>>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し']
>>> for line in lines:
... print(display_rjust(line, 20, '▒'))
▒▒▒▒▒▒▒▒▒▒Table flip
▒▒▒▒▒▒▒(╯°□°)╯︵ ┻━┻
▒▒▒▒▒▒▒▒ちゃぶ台返し
|
Return `text` right-justified in a Unicode string whose display width,
in a monospaced terminal, should be at least `width` character cells.
The rest of the string will be padded with `fillchar`, which must be
a width-1 character.
| 111 | 137 |
def display_rjust(text, width, fillchar=" "):
"""
Return `text` right-justified in a Unicode string whose display width,
in a monospaced terminal, should be at least `width` character cells.
The rest of the string will be padded with `fillchar`, which must be
a width-1 character.
"Right" here means toward the end of the string, which may actually be on
the left in an RTL context. This is similar to the use of the word "right"
in "right parenthesis".
>>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し']
>>> for line in lines:
... print(display_rjust(line, 20, '▒'))
▒▒▒▒▒▒▒▒▒▒Table flip
▒▒▒▒▒▒▒(╯°□°)╯︵ ┻━┻
▒▒▒▒▒▒▒▒ちゃぶ台返し
"""
if character_width(fillchar) != 1:
raise ValueError("The padding character must have display width 1")
text_width = monospaced_width(text)
if text_width == -1:
return text
padding = max(0, width - text_width)
return fillchar * padding + text
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/formatting.py#L111-L137
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17
] | 66.666667 |
[
18,
19,
21,
22,
23,
25,
26
] | 25.925926 | false | 24.242424 | 27 | 3 | 74.074074 | 15 |
def display_rjust(text, width, fillchar=" "):
if character_width(fillchar) != 1:
raise ValueError("The padding character must have display width 1")
text_width = monospaced_width(text)
if text_width == -1:
return text
padding = max(0, width - text_width)
return fillchar * padding + text
| 28,100 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/formatting.py
|
display_center
|
(text, width, fillchar=" ")
|
return fillchar * left_padding + text + fillchar * right_padding
|
Return `text` centered in a Unicode string whose display width, in a
monospaced terminal, should be at least `width` character cells. The rest
of the string will be padded with `fillchar`, which must be a width-1
character.
>>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し']
>>> for line in lines:
... print(display_center(line, 20, '▒'))
▒▒▒▒▒Table flip▒▒▒▒▒
▒▒▒(╯°□°)╯︵ ┻━┻▒▒▒▒
▒▒▒▒ちゃぶ台返し▒▒▒▒
|
Return `text` centered in a Unicode string whose display width, in a
monospaced terminal, should be at least `width` character cells. The rest
of the string will be padded with `fillchar`, which must be a width-1
character.
| 140 | 164 |
def display_center(text, width, fillchar=" "):
"""
Return `text` centered in a Unicode string whose display width, in a
monospaced terminal, should be at least `width` character cells. The rest
of the string will be padded with `fillchar`, which must be a width-1
character.
>>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し']
>>> for line in lines:
... print(display_center(line, 20, '▒'))
▒▒▒▒▒Table flip▒▒▒▒▒
▒▒▒(╯°□°)╯︵ ┻━┻▒▒▒▒
▒▒▒▒ちゃぶ台返し▒▒▒▒
"""
if character_width(fillchar) != 1:
raise ValueError("The padding character must have display width 1")
text_width = monospaced_width(text)
if text_width == -1:
return text
padding = max(0, width - text_width)
left_padding = padding // 2
right_padding = padding - left_padding
return fillchar * left_padding + text + fillchar * right_padding
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/formatting.py#L140-L164
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13
] | 56 |
[
14,
15,
17,
18,
19,
21,
22,
23,
24
] | 36 | false | 24.242424 | 25 | 3 | 64 | 11 |
def display_center(text, width, fillchar=" "):
if character_width(fillchar) != 1:
raise ValueError("The padding character must have display width 1")
text_width = monospaced_width(text)
if text_width == -1:
return text
padding = max(0, width - text_width)
left_padding = padding // 2
right_padding = padding - left_padding
return fillchar * left_padding + text + fillchar * right_padding
| 28,101 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/__init__.py
|
_config_from_kwargs
|
(
config: TextFixerConfig, kwargs: Dict[str, Any]
)
|
return config
|
Handle parameters provided as keyword arguments to ftfy's top-level
functions, converting them into a TextFixerConfig.
|
Handle parameters provided as keyword arguments to ftfy's top-level
functions, converting them into a TextFixerConfig.
| 231 | 246 |
def _config_from_kwargs(
config: TextFixerConfig, kwargs: Dict[str, Any]
) -> TextFixerConfig:
"""
Handle parameters provided as keyword arguments to ftfy's top-level
functions, converting them into a TextFixerConfig.
"""
if "fix_entities" in kwargs:
warnings.warn(
"`fix_entities` has been renamed to `unescape_html`", DeprecationWarning
)
kwargs = kwargs.copy()
kwargs["unescape_html"] = kwargs["fix_entities"]
del kwargs["fix_entities"]
config = config._replace(**kwargs)
return config
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/__init__.py#L231-L246
| 44 |
[
0,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15
] | 68.75 |
[] | 0 | false | 87.553648 | 16 | 2 | 100 | 2 |
def _config_from_kwargs(
config: TextFixerConfig, kwargs: Dict[str, Any]
) -> TextFixerConfig:
if "fix_entities" in kwargs:
warnings.warn(
"`fix_entities` has been renamed to `unescape_html`", DeprecationWarning
)
kwargs = kwargs.copy()
kwargs["unescape_html"] = kwargs["fix_entities"]
del kwargs["fix_entities"]
config = config._replace(**kwargs)
return config
| 28,102 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/__init__.py
|
_try_fix
|
(
fixer_name: str,
text: str,
config: TextFixerConfig,
steps: Optional[List[ExplanationStep]],
)
|
return text
|
A helper function used across several 'fixer' steps, deciding whether to
apply the fix and whether to record the fix in `steps`.
|
A helper function used across several 'fixer' steps, deciding whether to
apply the fix and whether to record the fix in `steps`.
| 269 | 286 |
def _try_fix(
fixer_name: str,
text: str,
config: TextFixerConfig,
steps: Optional[List[ExplanationStep]],
) -> str:
"""
A helper function used across several 'fixer' steps, deciding whether to
apply the fix and whether to record the fix in `steps`.
"""
if getattr(config, fixer_name):
fixer = FIXERS[fixer_name]
fixed = fixer(text)
if steps is not None and fixed != text:
steps.append(ExplanationStep("apply", fixer_name))
return cast(str, fixed)
return text
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/__init__.py#L269-L286
| 44 |
[
0,
9,
10,
11,
12,
13,
14,
15,
16,
17
] | 55.555556 |
[] | 0 | false | 87.553648 | 18 | 4 | 100 | 2 |
def _try_fix(
fixer_name: str,
text: str,
config: TextFixerConfig,
steps: Optional[List[ExplanationStep]],
) -> str:
if getattr(config, fixer_name):
fixer = FIXERS[fixer_name]
fixed = fixer(text)
if steps is not None and fixed != text:
steps.append(ExplanationStep("apply", fixer_name))
return cast(str, fixed)
return text
| 28,103 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/__init__.py
|
fix_text
|
(text: str, config: Optional[TextFixerConfig] = None, **kwargs)
|
return "".join(out)
|
r"""
Given Unicode text as input, fix inconsistencies and glitches in it,
such as mojibake (text that was decoded in the wrong encoding).
Let's start with some examples:
>>> fix_text('✔ No problems')
'✔ No problems'
>>> print(fix_text("¯\\_(ã\x83\x84)_/¯"))
¯\_(ツ)_/¯
>>> fix_text('Broken text… it’s flubberific!')
"Broken text... it's flubberific!"
>>> fix_text('LOUD NOISES')
'LOUD NOISES'
ftfy applies a number of different fixes to the text, and can accept
configuration to select which fixes to apply.
The configuration takes the form of a :class:`TextFixerConfig` object,
and you can see a description of the options in that class's docstring
or in the full documentation at ftfy.readthedocs.org.
For convenience and backward compatibility, the configuration can also
take the form of keyword arguments, which will set the equivalently-named
fields of the TextFixerConfig object.
For example, here are two ways to fix text but skip the "uncurl_quotes"
step::
fix_text(text, TextFixerConfig(uncurl_quotes=False))
fix_text(text, uncurl_quotes=False)
This function fixes text in independent segments, which are usually lines
of text, or arbitrarily broken up every 1 million codepoints (configurable
with `config.max_decode_length`) if there aren't enough line breaks. The
bound on segment lengths helps to avoid unbounded slowdowns.
ftfy can also provide an 'explanation', a list of transformations it applied
to the text that would fix more text like it. This function doesn't provide
explanations (because there may be different fixes for different segments
of text).
To get an explanation, use the :func:`fix_and_explain()` function, which
fixes the string in one segment and explains what it fixed.
|
r"""
Given Unicode text as input, fix inconsistencies and glitches in it,
such as mojibake (text that was decoded in the wrong encoding).
| 289 | 360 |
def fix_text(text: str, config: Optional[TextFixerConfig] = None, **kwargs) -> str:
r"""
Given Unicode text as input, fix inconsistencies and glitches in it,
such as mojibake (text that was decoded in the wrong encoding).
Let's start with some examples:
>>> fix_text('✔ No problems')
'✔ No problems'
>>> print(fix_text("¯\\_(ã\x83\x84)_/¯"))
¯\_(ツ)_/¯
>>> fix_text('Broken text… it’s flubberific!')
"Broken text... it's flubberific!"
>>> fix_text('LOUD NOISES')
'LOUD NOISES'
ftfy applies a number of different fixes to the text, and can accept
configuration to select which fixes to apply.
The configuration takes the form of a :class:`TextFixerConfig` object,
and you can see a description of the options in that class's docstring
or in the full documentation at ftfy.readthedocs.org.
For convenience and backward compatibility, the configuration can also
take the form of keyword arguments, which will set the equivalently-named
fields of the TextFixerConfig object.
For example, here are two ways to fix text but skip the "uncurl_quotes"
step::
fix_text(text, TextFixerConfig(uncurl_quotes=False))
fix_text(text, uncurl_quotes=False)
This function fixes text in independent segments, which are usually lines
of text, or arbitrarily broken up every 1 million codepoints (configurable
with `config.max_decode_length`) if there aren't enough line breaks. The
bound on segment lengths helps to avoid unbounded slowdowns.
ftfy can also provide an 'explanation', a list of transformations it applied
to the text that would fix more text like it. This function doesn't provide
explanations (because there may be different fixes for different segments
of text).
To get an explanation, use the :func:`fix_and_explain()` function, which
fixes the string in one segment and explains what it fixed.
"""
if config is None:
config = TextFixerConfig(explain=False)
config = _config_from_kwargs(config, kwargs)
if isinstance(text, bytes):
raise UnicodeError(BYTES_ERROR_TEXT)
out = []
pos = 0
while pos < len(text):
textbreak = text.find("\n", pos) + 1
if textbreak == 0:
textbreak = len(text)
if (textbreak - pos) > config.max_decode_length:
textbreak = pos + config.max_decode_length
segment = text[pos:textbreak]
if config.unescape_html == "auto" and "<" in segment:
config = config._replace(unescape_html=False)
fixed_segment, _ = fix_and_explain(segment, config)
out.append(fixed_segment)
pos = textbreak
return "".join(out)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/__init__.py#L289-L360
| 44 |
[
0,
49,
50,
51,
52,
53,
55,
56,
57,
58,
59,
60,
61,
62,
64,
65,
66,
67,
68,
69,
70,
71
] | 30.555556 |
[
54,
63
] | 2.777778 | false | 87.553648 | 72 | 8 | 97.222222 | 47 |
def fix_text(text: str, config: Optional[TextFixerConfig] = None, **kwargs) -> str:
r"""
Given Unicode text as input, fix inconsistencies and glitches in it,
such as mojibake (text that was decoded in the wrong encoding).
Let's start with some examples:
>>> fix_text('✔ No problems')
'✔ No problems'
>>> print(fix_text("¯\\_(ã\x83\x84)_/¯"))
¯\_(ツ)_/¯
>>> fix_text('Broken text… it’s flubberific!')
"Broken text... it's flubberific!"
>>> fix_text('LOUD NOISES')
'LOUD NOISES'
ftfy applies a number of different fixes to the text, and can accept
configuration to select which fixes to apply.
The configuration takes the form of a :class:`TextFixerConfig` object,
and you can see a description of the options in that class's docstring
or in the full documentation at ftfy.readthedocs.org.
For convenience and backward compatibility, the configuration can also
take the form of keyword arguments, which will set the equivalently-named
fields of the TextFixerConfig object.
For example, here are two ways to fix text but skip the "uncurl_quotes"
step::
fix_text(text, TextFixerConfig(uncurl_quotes=False))
fix_text(text, uncurl_quotes=False)
This function fixes text in independent segments, which are usually lines
of text, or arbitrarily broken up every 1 million codepoints (configurable
with `config.max_decode_length`) if there aren't enough line breaks. The
bound on segment lengths helps to avoid unbounded slowdowns.
ftfy can also provide an 'explanation', a list of transformations it applied
to the text that would fix more text like it. This function doesn't provide
explanations (because there may be different fixes for different segments
of text).
To get an explanation, use the :func:`fix_and_explain()` function, which
fixes the string in one segment and explains what it fixed.
"""
if config is None:
config = TextFixerConfig(explain=False)
config = _config_from_kwargs(config, kwargs)
if isinstance(text, bytes):
raise UnicodeError(BYTES_ERROR_TEXT)
out = []
pos = 0
while pos < len(text):
textbreak = text.find("\n", pos) + 1
if textbreak == 0:
textbreak = len(text)
if (textbreak - pos) > config.max_decode_length:
textbreak = pos + config.max_decode_length
segment = text[pos:textbreak]
if config.unescape_html == "auto" and "<" in segment:
config = config._replace(unescape_html=False)
fixed_segment, _ = fix_and_explain(segment, config)
out.append(fixed_segment)
pos = textbreak
return "".join(out)
| 28,104 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/__init__.py
|
fix_and_explain
|
(
text: str, config: Optional[TextFixerConfig] = None, **kwargs
)
|
Fix text as a single segment, returning the fixed text and an explanation
of what was fixed.
The explanation is a list of steps that can be applied with
:func:`apply_plan`, or if config.explain is False, it will be None.
|
Fix text as a single segment, returning the fixed text and an explanation
of what was fixed.
| 363 | 420 |
def fix_and_explain(
text: str, config: Optional[TextFixerConfig] = None, **kwargs
) -> ExplainedText:
"""
Fix text as a single segment, returning the fixed text and an explanation
of what was fixed.
The explanation is a list of steps that can be applied with
:func:`apply_plan`, or if config.explain is False, it will be None.
"""
if config is None:
config = TextFixerConfig()
if isinstance(text, bytes):
raise UnicodeError(BYTES_ERROR_TEXT)
config = _config_from_kwargs(config, kwargs)
if config.unescape_html == "auto" and "<" in text:
config = config._replace(unescape_html=False)
if config.explain:
steps: Optional[List[ExplanationStep]] = []
else:
# If explanations aren't desired, `steps` will be None
steps = None
while True:
origtext = text
text = _try_fix("unescape_html", text, config, steps)
if config.fix_encoding:
if steps is None:
text = fix_encoding(text)
else:
text, encoding_steps = fix_encoding_and_explain(text, config)
if encoding_steps is not None:
steps.extend(encoding_steps)
for fixer in [
"fix_c1_controls",
"fix_latin_ligatures",
"fix_character_width",
"uncurl_quotes",
"fix_line_breaks",
"fix_surrogates",
"remove_terminal_escapes",
"remove_control_chars",
]:
text = _try_fix(fixer, text, config, steps)
if config.normalization is not None:
fixed = unicodedata.normalize(config.normalization, text)
if steps is not None and fixed != text:
steps.append(ExplanationStep("normalize", config.normalization))
text = fixed
if text == origtext:
return ExplainedText(text, steps)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/__init__.py#L363-L420
| 44 |
[
0,
9,
10,
11,
12,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
54,
55,
56,
57
] | 82.758621 |
[
13,
53
] | 3.448276 | false | 87.553648 | 58 | 15 | 96.551724 | 5 |
def fix_and_explain(
text: str, config: Optional[TextFixerConfig] = None, **kwargs
) -> ExplainedText:
if config is None:
config = TextFixerConfig()
if isinstance(text, bytes):
raise UnicodeError(BYTES_ERROR_TEXT)
config = _config_from_kwargs(config, kwargs)
if config.unescape_html == "auto" and "<" in text:
config = config._replace(unescape_html=False)
if config.explain:
steps: Optional[List[ExplanationStep]] = []
else:
# If explanations aren't desired, `steps` will be None
steps = None
while True:
origtext = text
text = _try_fix("unescape_html", text, config, steps)
if config.fix_encoding:
if steps is None:
text = fix_encoding(text)
else:
text, encoding_steps = fix_encoding_and_explain(text, config)
if encoding_steps is not None:
steps.extend(encoding_steps)
for fixer in [
"fix_c1_controls",
"fix_latin_ligatures",
"fix_character_width",
"uncurl_quotes",
"fix_line_breaks",
"fix_surrogates",
"remove_terminal_escapes",
"remove_control_chars",
]:
text = _try_fix(fixer, text, config, steps)
if config.normalization is not None:
fixed = unicodedata.normalize(config.normalization, text)
if steps is not None and fixed != text:
steps.append(ExplanationStep("normalize", config.normalization))
text = fixed
if text == origtext:
return ExplainedText(text, steps)
| 28,105 |
|
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/__init__.py
|
fix_encoding_and_explain
|
(
text: str, config: Optional[TextFixerConfig] = None, **kwargs
)
|
Apply the steps of ftfy that detect mojibake and fix it. Returns the fixed
text and a list explaining what was fixed.
This includes fixing text by encoding and decoding it in different encodings,
as well as the subordinate fixes `restore_byte_a0`, `replace_lossy_sequences`,
`decode_inconsistent_utf8`, and `fix_c1_controls`.
Examples::
>>> fix_encoding_and_explain("só")
ExplainedText(text='só', explanation=[('encode', 'latin-1'), ('decode', 'utf-8')])
>>> result = fix_encoding_and_explain("voilà le travail")
>>> result.text
'voilà le travail'
>>> result.explanation
[('encode', 'latin-1'), ('transcode', 'restore_byte_a0'), ('decode', 'utf-8')]
|
Apply the steps of ftfy that detect mojibake and fix it. Returns the fixed
text and a list explaining what was fixed.
| 423 | 464 |
def fix_encoding_and_explain(
text: str, config: Optional[TextFixerConfig] = None, **kwargs
) -> ExplainedText:
"""
Apply the steps of ftfy that detect mojibake and fix it. Returns the fixed
text and a list explaining what was fixed.
This includes fixing text by encoding and decoding it in different encodings,
as well as the subordinate fixes `restore_byte_a0`, `replace_lossy_sequences`,
`decode_inconsistent_utf8`, and `fix_c1_controls`.
Examples::
>>> fix_encoding_and_explain("só")
ExplainedText(text='só', explanation=[('encode', 'latin-1'), ('decode', 'utf-8')])
>>> result = fix_encoding_and_explain("voilà le travail")
>>> result.text
'voilà le travail'
>>> result.explanation
[('encode', 'latin-1'), ('transcode', 'restore_byte_a0'), ('decode', 'utf-8')]
"""
if config is None:
config = TextFixerConfig()
if isinstance(text, bytes):
raise UnicodeError(BYTES_ERROR_TEXT)
config = _config_from_kwargs(config, kwargs)
if not config.fix_encoding:
# A weird trivial case: we're asked to fix the encoding, but skip
# fixing the encoding
return ExplainedText(text, [])
plan_so_far: List[ExplanationStep] = []
while True:
prevtext = text
text, plan = _fix_encoding_one_step_and_explain(text, config)
if plan is not None:
plan_so_far.extend(plan)
if text == prevtext:
return ExplainedText(text, plan_so_far)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/__init__.py#L423-L464
| 44 |
[
0,
22,
23,
24,
25,
27,
28,
29,
30,
31,
33,
34,
35,
36,
37,
38,
39,
40,
41
] | 45.238095 |
[
26,
32
] | 4.761905 | false | 87.553648 | 42 | 7 | 95.238095 | 17 |
def fix_encoding_and_explain(
text: str, config: Optional[TextFixerConfig] = None, **kwargs
) -> ExplainedText:
if config is None:
config = TextFixerConfig()
if isinstance(text, bytes):
raise UnicodeError(BYTES_ERROR_TEXT)
config = _config_from_kwargs(config, kwargs)
if not config.fix_encoding:
# A weird trivial case: we're asked to fix the encoding, but skip
# fixing the encoding
return ExplainedText(text, [])
plan_so_far: List[ExplanationStep] = []
while True:
prevtext = text
text, plan = _fix_encoding_one_step_and_explain(text, config)
if plan is not None:
plan_so_far.extend(plan)
if text == prevtext:
return ExplainedText(text, plan_so_far)
| 28,106 |
|
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/__init__.py
|
_fix_encoding_one_step_and_explain
|
(
text: str, config: TextFixerConfig
)
|
return ExplainedText(text, [])
|
Perform one step of fixing the encoding of text.
|
Perform one step of fixing the encoding of text.
| 467 | 577 |
def _fix_encoding_one_step_and_explain(
text: str, config: TextFixerConfig
) -> ExplainedText:
"""
Perform one step of fixing the encoding of text.
"""
if config is None:
config = TextFixerConfig()
if len(text) == 0:
return ExplainedText(text, [])
# The first plan is to return ASCII text unchanged, as well as text
# that doesn't look like it contains mojibake
if chardata.possible_encoding(text, "ascii") or not is_bad(text):
return ExplainedText(text, [])
# As we go through the next step, remember the possible encodings
# that we encounter but don't successfully fix yet. We may need them
# later.
possible_1byte_encodings = []
# Suppose the text was supposed to be UTF-8, but it was decoded using
# a single-byte encoding instead. When these cases can be fixed, they
# are usually the correct thing to do, so try them next.
for encoding in chardata.CHARMAP_ENCODINGS:
if chardata.possible_encoding(text, encoding):
possible_1byte_encodings.append(encoding)
encoded_bytes = text.encode(encoding)
encode_step = ExplanationStep("encode", encoding)
transcode_steps = []
# Now, find out if it's UTF-8 (or close enough). Otherwise,
# remember the encoding for later.
try:
decoding = "utf-8"
# Check encoded_bytes for sequences that would be UTF-8,
# except they have b' ' where b'\xa0' would belong.
if config.restore_byte_a0 and chardata.ALTERED_UTF8_RE.search(
encoded_bytes
):
replaced_bytes = fixes.restore_byte_a0(encoded_bytes)
if replaced_bytes != encoded_bytes:
transcode_steps.append(
ExplanationStep("transcode", "restore_byte_a0")
)
encoded_bytes = replaced_bytes
# Replace sequences where information has been lost
if config.replace_lossy_sequences and encoding.startswith("sloppy"):
replaced_bytes = fixes.replace_lossy_sequences(encoded_bytes)
if replaced_bytes != encoded_bytes:
transcode_steps.append(
ExplanationStep("transcode", "replace_lossy_sequences")
)
encoded_bytes = replaced_bytes
if 0xED in encoded_bytes or 0xC0 in encoded_bytes:
decoding = "utf-8-variants"
decode_step = ExplanationStep("decode", decoding)
steps = [encode_step] + transcode_steps + [decode_step]
fixed = encoded_bytes.decode(decoding)
return ExplainedText(fixed, steps)
except UnicodeDecodeError:
pass
# Look for a-hat-euro sequences that remain, and fix them in isolation.
if config.decode_inconsistent_utf8 and chardata.UTF8_DETECTOR_RE.search(text):
steps = [ExplanationStep("apply", "decode_inconsistent_utf8")]
fixed = fixes.decode_inconsistent_utf8(text)
if fixed != text:
return ExplainedText(fixed, steps)
# The next most likely case is that this is Latin-1 that was intended to
# be read as Windows-1252, because those two encodings in particular are
# easily confused.
if "latin-1" in possible_1byte_encodings:
if "windows-1252" in possible_1byte_encodings:
# This text is in the intersection of Latin-1 and
# Windows-1252, so it's probably legit.
return ExplainedText(text, [])
else:
# Otherwise, it means we have characters that are in Latin-1 but
# not in Windows-1252. Those are C1 control characters. Nobody
# wants those. Assume they were meant to be Windows-1252.
try:
fixed = text.encode("latin-1").decode("windows-1252")
if fixed != text:
steps = [
ExplanationStep("encode", "latin-1"),
ExplanationStep("decode", "windows-1252"),
]
return ExplainedText(fixed, steps)
except UnicodeDecodeError:
pass
# Fix individual characters of Latin-1 with a less satisfying explanation
if config.fix_c1_controls and chardata.C1_CONTROL_RE.search(text):
steps = [ExplanationStep("transcode", "fix_c1_controls")]
fixed = fixes.fix_c1_controls(text)
return ExplainedText(fixed, steps)
# The cases that remain are mixups between two different single-byte
# encodings, and not the common case of Latin-1 vs. Windows-1252.
#
# With the new heuristic in 6.0, it's possible that we're closer to solving
# these in some cases. It would require a lot of testing and tuning, though.
# For now, we leave the text unchanged in these cases.
return ExplainedText(text, [])
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/__init__.py#L467-L577
| 44 |
[
0,
5,
6,
8,
9,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71,
72,
73,
74,
75,
76,
77,
78,
79,
80,
81,
86,
87,
88,
89,
90,
91,
92,
93,
94,
95,
96,
97,
98,
99,
100,
101,
102,
103,
104,
105,
106,
107,
108,
109,
110
] | 89.189189 |
[
7,
10,
82
] | 2.702703 | false | 87.553648 | 111 | 25 | 97.297297 | 1 |
def _fix_encoding_one_step_and_explain(
text: str, config: TextFixerConfig
) -> ExplainedText:
if config is None:
config = TextFixerConfig()
if len(text) == 0:
return ExplainedText(text, [])
# The first plan is to return ASCII text unchanged, as well as text
# that doesn't look like it contains mojibake
if chardata.possible_encoding(text, "ascii") or not is_bad(text):
return ExplainedText(text, [])
# As we go through the next step, remember the possible encodings
# that we encounter but don't successfully fix yet. We may need them
# later.
possible_1byte_encodings = []
# Suppose the text was supposed to be UTF-8, but it was decoded using
# a single-byte encoding instead. When these cases can be fixed, they
# are usually the correct thing to do, so try them next.
for encoding in chardata.CHARMAP_ENCODINGS:
if chardata.possible_encoding(text, encoding):
possible_1byte_encodings.append(encoding)
encoded_bytes = text.encode(encoding)
encode_step = ExplanationStep("encode", encoding)
transcode_steps = []
# Now, find out if it's UTF-8 (or close enough). Otherwise,
# remember the encoding for later.
try:
decoding = "utf-8"
# Check encoded_bytes for sequences that would be UTF-8,
# except they have b' ' where b'\xa0' would belong.
if config.restore_byte_a0 and chardata.ALTERED_UTF8_RE.search(
encoded_bytes
):
replaced_bytes = fixes.restore_byte_a0(encoded_bytes)
if replaced_bytes != encoded_bytes:
transcode_steps.append(
ExplanationStep("transcode", "restore_byte_a0")
)
encoded_bytes = replaced_bytes
# Replace sequences where information has been lost
if config.replace_lossy_sequences and encoding.startswith("sloppy"):
replaced_bytes = fixes.replace_lossy_sequences(encoded_bytes)
if replaced_bytes != encoded_bytes:
transcode_steps.append(
ExplanationStep("transcode", "replace_lossy_sequences")
)
encoded_bytes = replaced_bytes
if 0xED in encoded_bytes or 0xC0 in encoded_bytes:
decoding = "utf-8-variants"
decode_step = ExplanationStep("decode", decoding)
steps = [encode_step] + transcode_steps + [decode_step]
fixed = encoded_bytes.decode(decoding)
return ExplainedText(fixed, steps)
except UnicodeDecodeError:
pass
# Look for a-hat-euro sequences that remain, and fix them in isolation.
if config.decode_inconsistent_utf8 and chardata.UTF8_DETECTOR_RE.search(text):
steps = [ExplanationStep("apply", "decode_inconsistent_utf8")]
fixed = fixes.decode_inconsistent_utf8(text)
if fixed != text:
return ExplainedText(fixed, steps)
# The next most likely case is that this is Latin-1 that was intended to
# be read as Windows-1252, because those two encodings in particular are
# easily confused.
if "latin-1" in possible_1byte_encodings:
if "windows-1252" in possible_1byte_encodings:
# This text is in the intersection of Latin-1 and
# Windows-1252, so it's probably legit.
return ExplainedText(text, [])
else:
# Otherwise, it means we have characters that are in Latin-1 but
# not in Windows-1252. Those are C1 control characters. Nobody
# wants those. Assume they were meant to be Windows-1252.
try:
fixed = text.encode("latin-1").decode("windows-1252")
if fixed != text:
steps = [
ExplanationStep("encode", "latin-1"),
ExplanationStep("decode", "windows-1252"),
]
return ExplainedText(fixed, steps)
except UnicodeDecodeError:
pass
# Fix individual characters of Latin-1 with a less satisfying explanation
if config.fix_c1_controls and chardata.C1_CONTROL_RE.search(text):
steps = [ExplanationStep("transcode", "fix_c1_controls")]
fixed = fixes.fix_c1_controls(text)
return ExplainedText(fixed, steps)
# The cases that remain are mixups between two different single-byte
# encodings, and not the common case of Latin-1 vs. Windows-1252.
#
# With the new heuristic in 6.0, it's possible that we're closer to solving
# these in some cases. It would require a lot of testing and tuning, though.
# For now, we leave the text unchanged in these cases.
return ExplainedText(text, [])
| 28,107 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/__init__.py
|
fix_encoding
|
(text: str, config: Optional[TextFixerConfig] = None, **kwargs)
|
return fixed
|
Apply just the encoding-fixing steps of ftfy to this text. Returns the
fixed text, discarding the explanation.
>>> fix_encoding("ó")
'ó'
>>> fix_encoding("&ATILDE;&SUP3;")
'&ATILDE;&SUP3;'
|
Apply just the encoding-fixing steps of ftfy to this text. Returns the
fixed text, discarding the explanation.
| 580 | 594 |
def fix_encoding(text: str, config: Optional[TextFixerConfig] = None, **kwargs):
"""
Apply just the encoding-fixing steps of ftfy to this text. Returns the
fixed text, discarding the explanation.
>>> fix_encoding("ó")
'ó'
>>> fix_encoding("&ATILDE;&SUP3;")
'&ATILDE;&SUP3;'
"""
if config is None:
config = TextFixerConfig(explain=False)
config = _config_from_kwargs(config, kwargs)
fixed, _explan = fix_encoding_and_explain(text, config)
return fixed
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/__init__.py#L580-L594
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14
] | 100 |
[] | 0 | true | 87.553648 | 15 | 2 | 100 | 7 |
def fix_encoding(text: str, config: Optional[TextFixerConfig] = None, **kwargs):
if config is None:
config = TextFixerConfig(explain=False)
config = _config_from_kwargs(config, kwargs)
fixed, _explan = fix_encoding_and_explain(text, config)
return fixed
| 28,108 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/__init__.py
|
fix_text_segment
|
(text: str, config: Optional[TextFixerConfig] = None, **kwargs)
|
return fixed
|
Fix text as a single segment, with a consistent sequence of steps that
are applied to fix the text. Discard the explanation.
|
Fix text as a single segment, with a consistent sequence of steps that
are applied to fix the text. Discard the explanation.
| 601 | 610 |
def fix_text_segment(text: str, config: Optional[TextFixerConfig] = None, **kwargs):
"""
Fix text as a single segment, with a consistent sequence of steps that
are applied to fix the text. Discard the explanation.
"""
if config is None:
config = TextFixerConfig(explain=False)
config = _config_from_kwargs(config, kwargs)
fixed, _explan = fix_and_explain(text, config)
return fixed
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/__init__.py#L601-L610
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9
] | 100 |
[] | 0 | true | 87.553648 | 10 | 2 | 100 | 2 |
def fix_text_segment(text: str, config: Optional[TextFixerConfig] = None, **kwargs):
if config is None:
config = TextFixerConfig(explain=False)
config = _config_from_kwargs(config, kwargs)
fixed, _explan = fix_and_explain(text, config)
return fixed
| 28,109 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/__init__.py
|
fix_file
|
(
input_file: TextIO,
encoding: Optional[str] = None,
config: Optional[TextFixerConfig] = None,
**kwargs
)
|
Fix text that is found in a file.
If the file is being read as Unicode text, use that. If it's being read as
bytes, then we hope an encoding was supplied. If not, unfortunately, we
have to guess what encoding it is. We'll try a few common encodings, but we
make no promises. See the `guess_bytes` function for how this is done.
The output is a stream of fixed lines of text.
|
Fix text that is found in a file.
| 613 | 643 |
def fix_file(
input_file: TextIO,
encoding: Optional[str] = None,
config: Optional[TextFixerConfig] = None,
**kwargs
) -> Iterator[str]:
"""
Fix text that is found in a file.
If the file is being read as Unicode text, use that. If it's being read as
bytes, then we hope an encoding was supplied. If not, unfortunately, we
have to guess what encoding it is. We'll try a few common encodings, but we
make no promises. See the `guess_bytes` function for how this is done.
The output is a stream of fixed lines of text.
"""
if config is None:
config = TextFixerConfig()
config = _config_from_kwargs(config, kwargs)
for line in input_file:
if isinstance(line, bytes):
if encoding is None:
line, encoding = guess_bytes(line)
else:
line = line.decode(encoding)
if config.unescape_html == "auto" and "<" in line:
config = config._replace(unescape_html=False)
fixed_line, _explan = fix_and_explain(line, config)
yield fixed_line
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/__init__.py#L613-L643
| 44 |
[
0
] | 3.225806 |
[
16,
17,
18,
20,
21,
22,
23,
25,
26,
27,
29,
30
] | 38.709677 | false | 87.553648 | 31 | 7 | 61.290323 | 8 |
def fix_file(
input_file: TextIO,
encoding: Optional[str] = None,
config: Optional[TextFixerConfig] = None,
**kwargs
) -> Iterator[str]:
if config is None:
config = TextFixerConfig()
config = _config_from_kwargs(config, kwargs)
for line in input_file:
if isinstance(line, bytes):
if encoding is None:
line, encoding = guess_bytes(line)
else:
line = line.decode(encoding)
if config.unescape_html == "auto" and "<" in line:
config = config._replace(unescape_html=False)
fixed_line, _explan = fix_and_explain(line, config)
yield fixed_line
| 28,110 |
|
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/__init__.py
|
guess_bytes
|
(bstring: bytes)
|
return bstring.decode("sloppy-windows-1252"), "sloppy-windows-1252"
|
NOTE: Using `guess_bytes` is not the recommended way of using ftfy. ftfy
is not designed to be an encoding detector.
In the unfortunate situation that you have some bytes in an unknown
encoding, ftfy can guess a reasonable strategy for decoding them, by trying
a few common encodings that can be distinguished from each other.
Unlike the rest of ftfy, this may not be accurate, and it may *create*
Unicode problems instead of solving them!
The encodings we try here are:
- UTF-16 with a byte order mark, because a UTF-16 byte order mark looks
like nothing else
- UTF-8, because it's the global standard, which has been used by a
majority of the Web since 2008
- "utf-8-variants", or buggy implementations of UTF-8
- MacRoman, because Microsoft Office thinks it's still a thing, and it
can be distinguished by its line breaks. (If there are no line breaks in
the string, though, you're out of luck.)
- "sloppy-windows-1252", the Latin-1-like encoding that is the most common
single-byte encoding.
|
NOTE: Using `guess_bytes` is not the recommended way of using ftfy. ftfy
is not designed to be an encoding detector.
| 646 | 714 |
def guess_bytes(bstring: bytes) -> Tuple[str, str]:
"""
NOTE: Using `guess_bytes` is not the recommended way of using ftfy. ftfy
is not designed to be an encoding detector.
In the unfortunate situation that you have some bytes in an unknown
encoding, ftfy can guess a reasonable strategy for decoding them, by trying
a few common encodings that can be distinguished from each other.
Unlike the rest of ftfy, this may not be accurate, and it may *create*
Unicode problems instead of solving them!
The encodings we try here are:
- UTF-16 with a byte order mark, because a UTF-16 byte order mark looks
like nothing else
- UTF-8, because it's the global standard, which has been used by a
majority of the Web since 2008
- "utf-8-variants", or buggy implementations of UTF-8
- MacRoman, because Microsoft Office thinks it's still a thing, and it
can be distinguished by its line breaks. (If there are no line breaks in
the string, though, you're out of luck.)
- "sloppy-windows-1252", the Latin-1-like encoding that is the most common
single-byte encoding.
"""
if isinstance(bstring, str):
raise UnicodeError(
"This string was already decoded as Unicode. You should pass "
"bytes to guess_bytes, not Unicode."
)
if bstring.startswith(b"\xfe\xff") or bstring.startswith(b"\xff\xfe"):
return bstring.decode("utf-16"), "utf-16"
byteset = set(bstring)
try:
if 0xED in byteset or 0xC0 in byteset:
# Byte 0xed can be used to encode a range of codepoints that
# are UTF-16 surrogates. UTF-8 does not use UTF-16 surrogates,
# so when we see 0xed, it's very likely we're being asked to
# decode CESU-8, the variant that encodes UTF-16 surrogates
# instead of the original characters themselves.
#
# This will occasionally trigger on standard UTF-8, as there
# are some Korean characters that also use byte 0xed, but that's
# not harmful because standard UTF-8 characters will decode the
# same way in our 'utf-8-variants' codec.
#
# Byte 0xc0 is impossible because, numerically, it would only
# encode characters lower than U+0040. Those already have
# single-byte representations, and UTF-8 requires using the
# shortest possible representation. However, Java hides the null
# codepoint, U+0000, in a non-standard longer representation -- it
# encodes it as 0xc0 0x80 instead of 0x00, guaranteeing that 0x00
# will never appear in the encoded bytes.
#
# The 'utf-8-variants' decoder can handle both of these cases, as
# well as standard UTF-8, at the cost of a bit of speed.
return bstring.decode("utf-8-variants"), "utf-8-variants"
else:
return bstring.decode("utf-8"), "utf-8"
except UnicodeDecodeError:
pass
if 0x0D in byteset and 0x0A not in byteset:
# Files that contain CR and not LF are likely to be MacRoman.
return bstring.decode("macroman"), "macroman"
return bstring.decode("sloppy-windows-1252"), "sloppy-windows-1252"
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/__init__.py#L646-L714
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
51,
52,
53,
54,
55,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68
] | 94.202899 |
[
26
] | 1.449275 | false | 87.553648 | 69 | 9 | 98.550725 | 22 |
def guess_bytes(bstring: bytes) -> Tuple[str, str]:
if isinstance(bstring, str):
raise UnicodeError(
"This string was already decoded as Unicode. You should pass "
"bytes to guess_bytes, not Unicode."
)
if bstring.startswith(b"\xfe\xff") or bstring.startswith(b"\xff\xfe"):
return bstring.decode("utf-16"), "utf-16"
byteset = set(bstring)
try:
if 0xED in byteset or 0xC0 in byteset:
# Byte 0xed can be used to encode a range of codepoints that
# are UTF-16 surrogates. UTF-8 does not use UTF-16 surrogates,
# so when we see 0xed, it's very likely we're being asked to
# decode CESU-8, the variant that encodes UTF-16 surrogates
# instead of the original characters themselves.
#
# This will occasionally trigger on standard UTF-8, as there
# are some Korean characters that also use byte 0xed, but that's
# not harmful because standard UTF-8 characters will decode the
# same way in our 'utf-8-variants' codec.
#
# Byte 0xc0 is impossible because, numerically, it would only
# encode characters lower than U+0040. Those already have
# single-byte representations, and UTF-8 requires using the
# shortest possible representation. However, Java hides the null
# codepoint, U+0000, in a non-standard longer representation -- it
# encodes it as 0xc0 0x80 instead of 0x00, guaranteeing that 0x00
# will never appear in the encoded bytes.
#
# The 'utf-8-variants' decoder can handle both of these cases, as
# well as standard UTF-8, at the cost of a bit of speed.
return bstring.decode("utf-8-variants"), "utf-8-variants"
else:
return bstring.decode("utf-8"), "utf-8"
except UnicodeDecodeError:
pass
if 0x0D in byteset and 0x0A not in byteset:
# Files that contain CR and not LF are likely to be MacRoman.
return bstring.decode("macroman"), "macroman"
return bstring.decode("sloppy-windows-1252"), "sloppy-windows-1252"
| 28,111 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/__init__.py
|
apply_plan
|
(text: str, plan: List[Tuple[str, str]])
|
return obj
|
Apply a plan for fixing the encoding of text.
The plan is a list of tuples of the form (operation, arg).
`operation` is one of:
- `'encode'`: convert a string to bytes, using `arg` as the encoding
- `'decode'`: convert bytes to a string, using `arg` as the encoding
- `'transcode'`: convert bytes to bytes, using the function named `arg`
- `'apply'`: convert a string to a string, using the function named `arg`
The functions that can be applied by 'transcode' and 'apply' are
specifically those that appear in the dictionary named `FIXERS`. They
can also can be imported from the `ftfy.fixes` module.
Example::
>>> mojibake = "schön"
>>> text, plan = fix_and_explain(mojibake)
>>> apply_plan(mojibake, plan)
'schön'
|
Apply a plan for fixing the encoding of text.
| 718 | 756 |
def apply_plan(text: str, plan: List[Tuple[str, str]]):
"""
Apply a plan for fixing the encoding of text.
The plan is a list of tuples of the form (operation, arg).
`operation` is one of:
- `'encode'`: convert a string to bytes, using `arg` as the encoding
- `'decode'`: convert bytes to a string, using `arg` as the encoding
- `'transcode'`: convert bytes to bytes, using the function named `arg`
- `'apply'`: convert a string to a string, using the function named `arg`
The functions that can be applied by 'transcode' and 'apply' are
specifically those that appear in the dictionary named `FIXERS`. They
can also can be imported from the `ftfy.fixes` module.
Example::
>>> mojibake = "schön"
>>> text, plan = fix_and_explain(mojibake)
>>> apply_plan(mojibake, plan)
'schön'
"""
obj = text
for operation, encoding in plan:
if operation == "encode":
obj = obj.encode(encoding)
elif operation == "decode":
obj = obj.decode(encoding)
elif operation in ("transcode", "apply"):
if encoding in FIXERS:
obj = FIXERS[encoding](obj)
else:
raise ValueError("Unknown function to apply: %s" % encoding)
else:
raise ValueError("Unknown plan step: %s" % operation)
return obj
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/__init__.py#L718-L756
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
37,
38
] | 92.307692 |
[
34,
36
] | 5.128205 | false | 87.553648 | 39 | 6 | 94.871795 | 21 |
def apply_plan(text: str, plan: List[Tuple[str, str]]):
obj = text
for operation, encoding in plan:
if operation == "encode":
obj = obj.encode(encoding)
elif operation == "decode":
obj = obj.decode(encoding)
elif operation in ("transcode", "apply"):
if encoding in FIXERS:
obj = FIXERS[encoding](obj)
else:
raise ValueError("Unknown function to apply: %s" % encoding)
else:
raise ValueError("Unknown plan step: %s" % operation)
return obj
| 28,112 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/__init__.py
|
explain_unicode
|
(text: str)
|
A utility method that's useful for debugging mysterious Unicode.
It breaks down a string, showing you for each codepoint its number in
hexadecimal, its glyph, its category in the Unicode standard, and its name
in the Unicode standard.
>>> explain_unicode('(╯°□°)╯︵ ┻━┻')
U+0028 ( [Ps] LEFT PARENTHESIS
U+256F ╯ [So] BOX DRAWINGS LIGHT ARC UP AND LEFT
U+00B0 ° [So] DEGREE SIGN
U+25A1 □ [So] WHITE SQUARE
U+00B0 ° [So] DEGREE SIGN
U+0029 ) [Pe] RIGHT PARENTHESIS
U+256F ╯ [So] BOX DRAWINGS LIGHT ARC UP AND LEFT
U+FE35 ︵ [Ps] PRESENTATION FORM FOR VERTICAL LEFT PARENTHESIS
U+0020 [Zs] SPACE
U+253B ┻ [So] BOX DRAWINGS HEAVY UP AND HORIZONTAL
U+2501 ━ [So] BOX DRAWINGS HEAVY HORIZONTAL
U+253B ┻ [So] BOX DRAWINGS HEAVY UP AND HORIZONTAL
|
A utility method that's useful for debugging mysterious Unicode.
| 759 | 793 |
def explain_unicode(text: str):
"""
A utility method that's useful for debugging mysterious Unicode.
It breaks down a string, showing you for each codepoint its number in
hexadecimal, its glyph, its category in the Unicode standard, and its name
in the Unicode standard.
>>> explain_unicode('(╯°□°)╯︵ ┻━┻')
U+0028 ( [Ps] LEFT PARENTHESIS
U+256F ╯ [So] BOX DRAWINGS LIGHT ARC UP AND LEFT
U+00B0 ° [So] DEGREE SIGN
U+25A1 □ [So] WHITE SQUARE
U+00B0 ° [So] DEGREE SIGN
U+0029 ) [Pe] RIGHT PARENTHESIS
U+256F ╯ [So] BOX DRAWINGS LIGHT ARC UP AND LEFT
U+FE35 ︵ [Ps] PRESENTATION FORM FOR VERTICAL LEFT PARENTHESIS
U+0020 [Zs] SPACE
U+253B ┻ [So] BOX DRAWINGS HEAVY UP AND HORIZONTAL
U+2501 ━ [So] BOX DRAWINGS HEAVY HORIZONTAL
U+253B ┻ [So] BOX DRAWINGS HEAVY UP AND HORIZONTAL
"""
for char in text:
if char.isprintable():
display = char
else:
display = char.encode("unicode-escape").decode("ascii")
print(
"U+{code:04X} {display} [{category}] {name}".format(
display=display_ljust(display, 7),
code=ord(char),
category=unicodedata.category(char),
name=unicodedata.name(char, "<unknown>"),
)
)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/__init__.py#L759-L793
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21
] | 62.857143 |
[
22,
23,
24,
26,
27
] | 14.285714 | false | 87.553648 | 35 | 3 | 85.714286 | 19 |
def explain_unicode(text: str):
for char in text:
if char.isprintable():
display = char
else:
display = char.encode("unicode-escape").decode("ascii")
print(
"U+{code:04X} {display} [{category}] {name}".format(
display=display_ljust(display, 7),
code=ord(char),
category=unicodedata.category(char),
name=unicodedata.name(char, "<unknown>"),
)
)
| 28,113 |
|
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/__init__.py
|
ExplanationStep.__repr__
|
(self)
|
return repr(tuple(self))
|
Get the string representation of an ExplanationStep. We output the
representation of the equivalent tuple, for simplicity.
|
Get the string representation of an ExplanationStep. We output the
representation of the equivalent tuple, for simplicity.
| 55 | 60 |
def __repr__(self) -> str:
"""
Get the string representation of an ExplanationStep. We output the
representation of the equivalent tuple, for simplicity.
"""
return repr(tuple(self))
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/__init__.py#L55-L60
| 44 |
[
0,
1,
2,
3,
4,
5
] | 100 |
[] | 0 | true | 87.553648 | 6 | 1 | 100 | 2 |
def __repr__(self) -> str:
return repr(tuple(self))
| 28,114 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/bad_codecs/sloppy.py
|
make_sloppy_codec
|
(encoding)
|
return codecs.CodecInfo(
name="sloppy-" + encoding,
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
Take a codec name, and return a 'sloppy' version of that codec that can
encode and decode the unassigned bytes in that encoding.
Single-byte encodings in the standard library are defined using some
boilerplate classes surrounding the functions that do the actual work,
`codecs.charmap_decode` and `charmap_encode`. This function, given an
encoding name, *defines* those boilerplate classes.
|
Take a codec name, and return a 'sloppy' version of that codec that can
encode and decode the unassigned bytes in that encoding.
| 82 | 153 |
def make_sloppy_codec(encoding):
"""
Take a codec name, and return a 'sloppy' version of that codec that can
encode and decode the unassigned bytes in that encoding.
Single-byte encodings in the standard library are defined using some
boilerplate classes surrounding the functions that do the actual work,
`codecs.charmap_decode` and `charmap_encode`. This function, given an
encoding name, *defines* those boilerplate classes.
"""
# Make a bytestring of all 256 possible bytes.
all_bytes = bytes(range(256))
# Get a list of what they would decode to in Latin-1.
sloppy_chars = list(all_bytes.decode("latin-1"))
# Get a list of what they decode to in the given encoding. Use the
# replacement character for unassigned bytes.
if PY26:
decoded_chars = all_bytes.decode(encoding, "replace")
else:
decoded_chars = all_bytes.decode(encoding, errors="replace")
# Update the sloppy_chars list. Each byte that was successfully decoded
# gets its decoded value in the list. The unassigned bytes are left as
# they are, which gives their decoding in Latin-1.
for i, char in enumerate(decoded_chars):
if char != REPLACEMENT_CHAR:
sloppy_chars[i] = char
# For ftfy's own purposes, we're going to allow byte 1A, the "Substitute"
# control code, to encode the Unicode replacement character U+FFFD.
sloppy_chars[0x1A] = REPLACEMENT_CHAR
# Create the data structures that tell the charmap methods how to encode
# and decode in this sloppy encoding.
decoding_table = "".join(sloppy_chars)
encoding_table = codecs.charmap_build(decoding_table)
# Now produce all the class boilerplate. Look at the Python source for
# `encodings.cp1252` for comparison; this is almost exactly the same,
# except I made it follow pep8.
class Codec(codecs.Codec):
def encode(self, input, errors="strict"):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors="strict"):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
return codecs.CodecInfo(
name="sloppy-" + encoding,
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/bad_codecs/sloppy.py#L82-L153
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
42,
43,
44,
45,
46,
47,
48,
49,
50,
52,
53,
54,
56,
57,
58,
59,
60,
61,
62,
63,
64,
65,
66,
67,
68,
69,
70,
71
] | 95.833333 |
[
19,
51,
55
] | 4.166667 | false | 92.105263 | 72 | 8 | 95.833333 | 7 |
def make_sloppy_codec(encoding):
# Make a bytestring of all 256 possible bytes.
all_bytes = bytes(range(256))
# Get a list of what they would decode to in Latin-1.
sloppy_chars = list(all_bytes.decode("latin-1"))
# Get a list of what they decode to in the given encoding. Use the
# replacement character for unassigned bytes.
if PY26:
decoded_chars = all_bytes.decode(encoding, "replace")
else:
decoded_chars = all_bytes.decode(encoding, errors="replace")
# Update the sloppy_chars list. Each byte that was successfully decoded
# gets its decoded value in the list. The unassigned bytes are left as
# they are, which gives their decoding in Latin-1.
for i, char in enumerate(decoded_chars):
if char != REPLACEMENT_CHAR:
sloppy_chars[i] = char
# For ftfy's own purposes, we're going to allow byte 1A, the "Substitute"
# control code, to encode the Unicode replacement character U+FFFD.
sloppy_chars[0x1A] = REPLACEMENT_CHAR
# Create the data structures that tell the charmap methods how to encode
# and decode in this sloppy encoding.
decoding_table = "".join(sloppy_chars)
encoding_table = codecs.charmap_build(decoding_table)
# Now produce all the class boilerplate. Look at the Python source for
# `encodings.cp1252` for comparison; this is almost exactly the same,
# except I made it follow pep8.
class Codec(codecs.Codec):
def encode(self, input, errors="strict"):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors="strict"):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
return codecs.CodecInfo(
name="sloppy-" + encoding,
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| 28,116 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/bad_codecs/utf8_variants.py
|
IncrementalDecoder._buffer_decode
|
(self, input, errors, final)
|
return "".join(decoded_segments), position
|
Decode bytes that may be arriving in a stream, following the Codecs
API.
`input` is the incoming sequence of bytes. `errors` tells us how to
handle errors, though we delegate all error-handling cases to the real
UTF-8 decoder to ensure correct behavior. `final` indicates whether
this is the end of the sequence, in which case we should raise an
error given incomplete input.
Returns as much decoded text as possible, and the number of bytes
consumed.
|
Decode bytes that may be arriving in a stream, following the Codecs
API.
| 93 | 132 |
def _buffer_decode(self, input, errors, final):
"""
Decode bytes that may be arriving in a stream, following the Codecs
API.
`input` is the incoming sequence of bytes. `errors` tells us how to
handle errors, though we delegate all error-handling cases to the real
UTF-8 decoder to ensure correct behavior. `final` indicates whether
this is the end of the sequence, in which case we should raise an
error given incomplete input.
Returns as much decoded text as possible, and the number of bytes
consumed.
"""
# decoded_segments are the pieces of text we have decoded so far,
# and position is our current position in the byte string. (Bytes
# before this position have been consumed, and bytes after it have
# yet to be decoded.)
decoded_segments = []
position = 0
while True:
# Use _buffer_decode_step to decode a segment of text.
decoded, consumed = self._buffer_decode_step(
input[position:], errors, final
)
if consumed == 0:
# Either there's nothing left to decode, or we need to wait
# for more input. Either way, we're done for now.
break
# Append the decoded text to the list, and update our position.
decoded_segments.append(decoded)
position += consumed
if final:
# _buffer_decode_step must consume all the bytes when `final` is
# true.
assert position == len(input)
return "".join(decoded_segments), position
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/bad_codecs/utf8_variants.py#L93-L132
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39
] | 100 |
[] | 0 | true | 89.655172 | 40 | 5 | 100 | 11 |
def _buffer_decode(self, input, errors, final):
# decoded_segments are the pieces of text we have decoded so far,
# and position is our current position in the byte string. (Bytes
# before this position have been consumed, and bytes after it have
# yet to be decoded.)
decoded_segments = []
position = 0
while True:
# Use _buffer_decode_step to decode a segment of text.
decoded, consumed = self._buffer_decode_step(
input[position:], errors, final
)
if consumed == 0:
# Either there's nothing left to decode, or we need to wait
# for more input. Either way, we're done for now.
break
# Append the decoded text to the list, and update our position.
decoded_segments.append(decoded)
position += consumed
if final:
# _buffer_decode_step must consume all the bytes when `final` is
# true.
assert position == len(input)
return "".join(decoded_segments), position
| 28,117 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/bad_codecs/utf8_variants.py
|
IncrementalDecoder._buffer_decode_step
|
(self, input, errors, final)
|
There are three possibilities for each decoding step:
- Decode as much real UTF-8 as possible.
- Decode a six-byte CESU-8 sequence at the current position.
- Decode a Java-style null at the current position.
This method figures out which step is appropriate, and does it.
|
There are three possibilities for each decoding step:
| 134 | 173 |
def _buffer_decode_step(self, input, errors, final):
"""
There are three possibilities for each decoding step:
- Decode as much real UTF-8 as possible.
- Decode a six-byte CESU-8 sequence at the current position.
- Decode a Java-style null at the current position.
This method figures out which step is appropriate, and does it.
"""
# Get a reference to the superclass method that we'll be using for
# most of the real work.
sup = UTF8IncrementalDecoder._buffer_decode
# Find the next byte position that indicates a variant of UTF-8.
match = SPECIAL_BYTES_RE.search(input)
if match is None:
return sup(input, errors, final)
cutoff = match.start()
if cutoff > 0:
return sup(input[:cutoff], errors, True)
# Some byte sequence that we intend to handle specially matches
# at the beginning of the input.
if input.startswith(b"\xc0"):
if len(input) > 1:
# Decode the two-byte sequence 0xc0 0x80.
return "\u0000", 2
else:
if final:
# We hit the end of the stream. Let the superclass method
# handle it.
return sup(input, errors, True)
else:
# Wait to see another byte.
return "", 0
else:
# Decode a possible six-byte sequence starting with 0xed.
return self._buffer_decode_surrogates(sup, input, errors, final)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/bad_codecs/utf8_variants.py#L134-L173
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
38,
39
] | 80 |
[
30,
33,
36
] | 7.5 | false | 89.655172 | 40 | 6 | 92.5 | 7 |
def _buffer_decode_step(self, input, errors, final):
# Get a reference to the superclass method that we'll be using for
# most of the real work.
sup = UTF8IncrementalDecoder._buffer_decode
# Find the next byte position that indicates a variant of UTF-8.
match = SPECIAL_BYTES_RE.search(input)
if match is None:
return sup(input, errors, final)
cutoff = match.start()
if cutoff > 0:
return sup(input[:cutoff], errors, True)
# Some byte sequence that we intend to handle specially matches
# at the beginning of the input.
if input.startswith(b"\xc0"):
if len(input) > 1:
# Decode the two-byte sequence 0xc0 0x80.
return "\u0000", 2
else:
if final:
# We hit the end of the stream. Let the superclass method
# handle it.
return sup(input, errors, True)
else:
# Wait to see another byte.
return "", 0
else:
# Decode a possible six-byte sequence starting with 0xed.
return self._buffer_decode_surrogates(sup, input, errors, final)
| 28,118 |
|
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/bad_codecs/utf8_variants.py
|
IncrementalDecoder._buffer_decode_surrogates
|
(sup, input, errors, final)
|
When we have improperly encoded surrogates, we can still see the
bits that they were meant to represent.
The surrogates were meant to encode a 20-bit number, to which we
add 0x10000 to get a codepoint. That 20-bit number now appears in
this form:
11101101 1010abcd 10efghij 11101101 1011klmn 10opqrst
The CESU8_RE above matches byte sequences of this form. Then we need
to extract the bits and assemble a codepoint number from them.
|
When we have improperly encoded surrogates, we can still see the
bits that they were meant to represent.
| 176 | 218 |
def _buffer_decode_surrogates(sup, input, errors, final):
"""
When we have improperly encoded surrogates, we can still see the
bits that they were meant to represent.
The surrogates were meant to encode a 20-bit number, to which we
add 0x10000 to get a codepoint. That 20-bit number now appears in
this form:
11101101 1010abcd 10efghij 11101101 1011klmn 10opqrst
The CESU8_RE above matches byte sequences of this form. Then we need
to extract the bits and assemble a codepoint number from them.
"""
if len(input) < 6:
if final:
# We found 0xed near the end of the stream, and there aren't
# six bytes to decode. Delegate to the superclass method to
# handle it as normal UTF-8. It might be a Hangul character
# or an error.
return sup(input, errors, final)
else:
# We found a surrogate, the stream isn't over yet, and we don't
# know enough of the following bytes to decode anything, so
# consume zero bytes and wait.
return "", 0
else:
if CESU8_RE.match(input):
# Given this is a CESU-8 sequence, do some math to pull out
# the intended 20-bit value, and consume six bytes.
codepoint = (
((input[1] & 0x0F) << 16)
+ ((input[2] & 0x3F) << 10)
+ ((input[4] & 0x0F) << 6)
+ (input[5] & 0x3F)
+ 0x10000
)
return chr(codepoint), 6
else:
# This looked like a CESU-8 sequence, but it wasn't one.
# 0xed indicates the start of a three-byte sequence, so give
# three bytes to the superclass to decode as usual.
return sup(input[:3], errors, False)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/bad_codecs/utf8_variants.py#L176-L218
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41
] | 88.372093 |
[
20,
42
] | 4.651163 | false | 89.655172 | 43 | 4 | 95.348837 | 11 |
def _buffer_decode_surrogates(sup, input, errors, final):
if len(input) < 6:
if final:
# We found 0xed near the end of the stream, and there aren't
# six bytes to decode. Delegate to the superclass method to
# handle it as normal UTF-8. It might be a Hangul character
# or an error.
return sup(input, errors, final)
else:
# We found a surrogate, the stream isn't over yet, and we don't
# know enough of the following bytes to decode anything, so
# consume zero bytes and wait.
return "", 0
else:
if CESU8_RE.match(input):
# Given this is a CESU-8 sequence, do some math to pull out
# the intended 20-bit value, and consume six bytes.
codepoint = (
((input[1] & 0x0F) << 16)
+ ((input[2] & 0x3F) << 10)
+ ((input[4] & 0x0F) << 6)
+ (input[5] & 0x3F)
+ 0x10000
)
return chr(codepoint), 6
else:
# This looked like a CESU-8 sequence, but it wasn't one.
# 0xed indicates the start of a three-byte sequence, so give
# three bytes to the superclass to decode as usual.
return sup(input[:3], errors, False)
| 28,119 |
|
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/bad_codecs/utf8_variants.py
|
StreamWriter.encode
|
(input: str, errors: str = "strict")
|
return IncrementalEncoder(errors).encode(input, final=True), len(input)
| 227 | 228 |
def encode(input: str, errors: str = "strict") -> Tuple[bytes, int]:
return IncrementalEncoder(errors).encode(input, final=True), len(input)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/bad_codecs/utf8_variants.py#L227-L228
| 44 |
[
0
] | 50 |
[
1
] | 50 | false | 89.655172 | 2 | 1 | 50 | 0 |
def encode(input: str, errors: str = "strict") -> Tuple[bytes, int]:
return IncrementalEncoder(errors).encode(input, final=True), len(input)
| 28,120 |
||
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/bad_codecs/utf8_variants.py
|
StreamReader.decode
|
(input: bytes, errors: str = "strict")
|
return IncrementalDecoder(errors).decode(input, final=True), len(input)
| 233 | 234 |
def decode(input: bytes, errors: str = "strict") -> Tuple[str, int]:
return IncrementalDecoder(errors).decode(input, final=True), len(input)
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/bad_codecs/utf8_variants.py#L233-L234
| 44 |
[
0,
1
] | 100 |
[] | 0 | true | 89.655172 | 2 | 1 | 100 | 0 |
def decode(input: bytes, errors: str = "strict") -> Tuple[str, int]:
return IncrementalDecoder(errors).decode(input, final=True), len(input)
| 28,121 |
||
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/bad_codecs/__init__.py
|
search_function
|
(encoding)
|
return codec
|
Register our "bad codecs" with Python's codecs API. This involves adding
a search function that takes in an encoding name, and returns a codec
for that encoding if it knows one, or None if it doesn't.
The encodings this will match are:
- Encodings of the form 'sloppy-windows-NNNN' or 'sloppy-iso-8859-N',
where the non-sloppy version is an encoding that leaves some bytes
unmapped to characters.
- The 'utf-8-variants' encoding, which has the several aliases seen
above.
|
Register our "bad codecs" with Python's codecs API. This involves adding
a search function that takes in an encoding name, and returns a codec
for that encoding if it knows one, or None if it doesn't.
| 54 | 85 |
def search_function(encoding):
"""
Register our "bad codecs" with Python's codecs API. This involves adding
a search function that takes in an encoding name, and returns a codec
for that encoding if it knows one, or None if it doesn't.
The encodings this will match are:
- Encodings of the form 'sloppy-windows-NNNN' or 'sloppy-iso-8859-N',
where the non-sloppy version is an encoding that leaves some bytes
unmapped to characters.
- The 'utf-8-variants' encoding, which has the several aliases seen
above.
"""
if encoding in _CACHE:
return _CACHE[encoding]
norm_encoding = normalize_encoding(encoding)
codec = None
if norm_encoding in UTF8_VAR_NAMES:
from ftfy.bad_codecs.utf8_variants import CODEC_INFO
codec = CODEC_INFO
elif norm_encoding.startswith("sloppy_"):
from ftfy.bad_codecs.sloppy import CODECS
codec = CODECS.get(norm_encoding)
if codec is not None:
_CACHE[encoding] = codec
return codec
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/bad_codecs/__init__.py#L54-L85
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31
] | 100 |
[] | 0 | true | 100 | 32 | 5 | 100 | 11 |
def search_function(encoding):
if encoding in _CACHE:
return _CACHE[encoding]
norm_encoding = normalize_encoding(encoding)
codec = None
if norm_encoding in UTF8_VAR_NAMES:
from ftfy.bad_codecs.utf8_variants import CODEC_INFO
codec = CODEC_INFO
elif norm_encoding.startswith("sloppy_"):
from ftfy.bad_codecs.sloppy import CODECS
codec = CODECS.get(norm_encoding)
if codec is not None:
_CACHE[encoding] = codec
return codec
| 28,122 |
rspeer/python-ftfy
|
fbb4570df343757c36ac2a86142c3cc5aaea2746
|
ftfy/bad_codecs/__init__.py
|
ok
|
()
|
A feel-good function that gives you something to call after importing
this package.
Why is this here? Pyflakes. Pyflakes gets upset when you import a module
and appear not to use it. It doesn't know that you're using it when
you use the ``unicode.encode`` and ``bytes.decode`` methods with certain
encodings.
|
A feel-good function that gives you something to call after importing
this package.
| 88 | 97 |
def ok():
"""
A feel-good function that gives you something to call after importing
this package.
Why is this here? Pyflakes. Pyflakes gets upset when you import a module
and appear not to use it. It doesn't know that you're using it when
you use the ``unicode.encode`` and ``bytes.decode`` methods with certain
encodings.
"""
|
https://github.com/rspeer/python-ftfy/blob/fbb4570df343757c36ac2a86142c3cc5aaea2746/project44/ftfy/bad_codecs/__init__.py#L88-L97
| 44 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9
] | 100 |
[] | 0 | true | 100 | 10 | 1 | 100 | 7 |
def ok():
| 28,123 |
|
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/omdict1D.py
|
omdict1D.add
|
(self, key, value)
|
return self
| 49 | 60 |
def add(self, key, value):
if not is_iterable_but_not_string(value):
value = [value]
if value:
self._map.setdefault(key, list())
for val in value:
node = self._items.append(key, val)
self._map[key].append(node)
return self
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/omdict1D.py#L49-L60
| 45 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11
] | 100 |
[] | 0 | true | 100 | 12 | 4 | 100 | 0 |
def add(self, key, value):
if not is_iterable_but_not_string(value):
value = [value]
if value:
self._map.setdefault(key, list())
for val in value:
node = self._items.append(key, val)
self._map[key].append(node)
return self
| 28,125 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/omdict1D.py
|
omdict1D.set
|
(self, key, value)
|
return self._set(key, value)
| 62 | 63 |
def set(self, key, value):
return self._set(key, value)
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/omdict1D.py#L62-L63
| 45 |
[
0,
1
] | 100 |
[] | 0 | true | 100 | 2 | 1 | 100 | 0 |
def set(self, key, value):
return self._set(key, value)
| 28,126 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/omdict1D.py
|
omdict1D.__setitem__
|
(self, key, value)
|
return self._set(key, value)
| 65 | 66 |
def __setitem__(self, key, value):
return self._set(key, value)
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/omdict1D.py#L65-L66
| 45 |
[
0,
1
] | 100 |
[] | 0 | true | 100 | 2 | 1 | 100 | 0 |
def __setitem__(self, key, value):
return self._set(key, value)
| 28,127 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/omdict1D.py
|
omdict1D._bin_update_items
|
(self, items, replace_at_most_one,
replacements, leftovers)
|
Subclassed from omdict._bin_update_items() to make update() and
updateall() process lists of values as multiple values.
<replacements> and <leftovers> are modified directly, ala pass by
reference.
|
Subclassed from omdict._bin_update_items() to make update() and
updateall() process lists of values as multiple values.
| 68 | 105 |
def _bin_update_items(self, items, replace_at_most_one,
replacements, leftovers):
"""
Subclassed from omdict._bin_update_items() to make update() and
updateall() process lists of values as multiple values.
<replacements> and <leftovers> are modified directly, ala pass by
reference.
"""
for key, values in items:
# <values> is not a list or an empty list.
like_list_not_str = is_iterable_but_not_string(values)
if not like_list_not_str or (like_list_not_str and not values):
values = [values]
for value in values:
# If the value is [], remove any existing leftovers with
# key <key> and set the list of values itself to [],
# which in turn will later delete <key> when [] is
# passed to omdict.setlist() in
# omdict._update_updateall().
if value == []:
replacements[key] = []
leftovers[:] = [lst for lst in leftovers if key != lst[0]]
# If there are existing items with key <key> that have
# yet to be marked for replacement, mark that item's
# value to be replaced by <value> by appending it to
# <replacements>.
elif (key in self and
replacements.get(key, _absent) in [[], _absent]):
replacements[key] = [value]
elif (key in self and not replace_at_most_one and
len(replacements[key]) < len(self.values(key))):
replacements[key].append(value)
elif replace_at_most_one:
replacements[key] = [value]
else:
leftovers.append((key, value))
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/omdict1D.py#L68-L105
| 45 |
[
0,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37
] | 81.578947 |
[] | 0 | false | 100 | 38 | 14 | 100 | 5 |
def _bin_update_items(self, items, replace_at_most_one,
replacements, leftovers):
for key, values in items:
# <values> is not a list or an empty list.
like_list_not_str = is_iterable_but_not_string(values)
if not like_list_not_str or (like_list_not_str and not values):
values = [values]
for value in values:
# If the value is [], remove any existing leftovers with
# key <key> and set the list of values itself to [],
# which in turn will later delete <key> when [] is
# passed to omdict.setlist() in
# omdict._update_updateall().
if value == []:
replacements[key] = []
leftovers[:] = [lst for lst in leftovers if key != lst[0]]
# If there are existing items with key <key> that have
# yet to be marked for replacement, mark that item's
# value to be replaced by <value> by appending it to
# <replacements>.
elif (key in self and
replacements.get(key, _absent) in [[], _absent]):
replacements[key] = [value]
elif (key in self and not replace_at_most_one and
len(replacements[key]) < len(self.values(key))):
replacements[key].append(value)
elif replace_at_most_one:
replacements[key] = [value]
else:
leftovers.append((key, value))
| 28,128 |
|
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/omdict1D.py
|
omdict1D._set
|
(self, key, value)
|
return self
| 107 | 112 |
def _set(self, key, value):
if not is_iterable_but_not_string(value):
value = [value]
self.setlist(key, value)
return self
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/omdict1D.py#L107-L112
| 45 |
[
0,
1,
2,
3,
4,
5
] | 100 |
[] | 0 | true | 100 | 6 | 2 | 100 | 0 |
def _set(self, key, value):
if not is_iterable_but_not_string(value):
value = [value]
self.setlist(key, value)
return self
| 28,129 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/common.py
|
callable_attr
|
(obj, attr)
|
return hasattr(obj, attr) and callable(getattr(obj, attr))
| 19 | 20 |
def callable_attr(obj, attr):
return hasattr(obj, attr) and callable(getattr(obj, attr))
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/common.py#L19-L20
| 45 |
[
0,
1
] | 100 |
[] | 0 | true | 100 | 2 | 2 | 100 | 0 |
def callable_attr(obj, attr):
return hasattr(obj, attr) and callable(getattr(obj, attr))
| 28,130 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/common.py
|
is_iterable_but_not_string
|
(v)
|
return callable_attr(v, '__iter__') and not isinstance(v, string_types)
| 23 | 24 |
def is_iterable_but_not_string(v):
return callable_attr(v, '__iter__') and not isinstance(v, string_types)
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/common.py#L23-L24
| 45 |
[
0,
1
] | 100 |
[] | 0 | true | 100 | 2 | 2 | 100 | 0 |
def is_iterable_but_not_string(v):
return callable_attr(v, '__iter__') and not isinstance(v, string_types)
| 28,131 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
lget
|
(lst, index, default=None)
| 92 | 96 |
def lget(lst, index, default=None):
try:
return lst[index]
except IndexError:
return default
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L92-L96
| 45 |
[
0,
1,
2,
3,
4
] | 100 |
[] | 0 | true | 96.258065 | 5 | 2 | 100 | 0 |
def lget(lst, index, default=None):
try:
return lst[index]
except IndexError:
return default
| 28,132 |
|||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
attemptstr
|
(o)
| 99 | 103 |
def attemptstr(o):
try:
return str(o)
except Exception:
return o
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L99-L103
| 45 |
[
0,
1,
2
] | 60 |
[
3,
4
] | 40 | false | 96.258065 | 5 | 2 | 60 | 0 |
def attemptstr(o):
try:
return str(o)
except Exception:
return o
| 28,133 |
|||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
utf8
|
(o, default=_absent)
| 106 | 110 |
def utf8(o, default=_absent):
try:
return o.encode('utf8')
except Exception:
return o if default is _absent else default
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L106-L110
| 45 |
[
0,
1,
2,
3,
4
] | 100 |
[] | 0 | true | 96.258065 | 5 | 2 | 100 | 0 |
def utf8(o, default=_absent):
try:
return o.encode('utf8')
except Exception:
return o if default is _absent else default
| 28,134 |
|||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
non_string_iterable
|
(o)
|
return callable_attr(o, '__iter__') and not isinstance(o, string_types)
| 113 | 114 |
def non_string_iterable(o):
return callable_attr(o, '__iter__') and not isinstance(o, string_types)
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L113-L114
| 45 |
[
0,
1
] | 100 |
[] | 0 | true | 96.258065 | 2 | 2 | 100 | 0 |
def non_string_iterable(o):
return callable_attr(o, '__iter__') and not isinstance(o, string_types)
| 28,135 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
idna_encode
|
(o)
|
return o
| 119 | 122 |
def idna_encode(o):
if callable_attr(o, 'encode'):
return str(o.encode('idna').decode('utf8'))
return o
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L119-L122
| 45 |
[
0,
1,
2,
3
] | 100 |
[] | 0 | true | 96.258065 | 4 | 2 | 100 | 0 |
def idna_encode(o):
if callable_attr(o, 'encode'):
return str(o.encode('idna').decode('utf8'))
return o
| 28,136 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
idna_decode
|
(o)
|
return o
| 125 | 128 |
def idna_decode(o):
if callable_attr(utf8(o), 'decode'):
return utf8(o).decode('idna')
return o
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L125-L128
| 45 |
[
0,
1,
2
] | 75 |
[
3
] | 25 | false | 96.258065 | 4 | 2 | 75 | 0 |
def idna_decode(o):
if callable_attr(utf8(o), 'decode'):
return utf8(o).decode('idna')
return o
| 28,137 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
is_valid_port
|
(port)
|
return True
| 131 | 135 |
def is_valid_port(port):
port = str(port)
if not port.isdigit() or not 0 < int(port) <= 65535:
return False
return True
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L131-L135
| 45 |
[
0,
1,
2,
3,
4
] | 100 |
[] | 0 | true | 96.258065 | 5 | 3 | 100 | 0 |
def is_valid_port(port):
port = str(port)
if not port.isdigit() or not 0 < int(port) <= 65535:
return False
return True
| 28,138 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
static_vars
|
(**kwargs)
|
return decorator
| 138 | 143 |
def static_vars(**kwargs):
def decorator(func):
for key, value in six.iteritems(kwargs):
setattr(func, key, value)
return func
return decorator
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L138-L143
| 45 |
[
0,
1,
2,
3,
4,
5
] | 100 |
[] | 0 | true | 96.258065 | 6 | 3 | 100 | 0 |
def static_vars(**kwargs):
def decorator(func):
for key, value in six.iteritems(kwargs):
setattr(func, key, value)
return func
return decorator
| 28,139 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
create_quote_fn
|
(safe_charset, quote_plus)
|
return quote_fn
| 146 | 164 |
def create_quote_fn(safe_charset, quote_plus):
def quote_fn(s, dont_quote):
if dont_quote is True:
safe = safe_charset
elif dont_quote is False:
safe = ''
else: # <dont_quote> is expected to be a string.
safe = dont_quote
# Prune duplicates and characters not in <safe_charset>.
safe = ''.join(set(safe) & set(safe_charset)) # E.g. '?^#?' -> '?'.
quoted = quote(s, safe)
if quote_plus:
quoted = quoted.replace('%20', '+')
return quoted
return quote_fn
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L146-L164
| 45 |
[
0,
1,
2,
3,
4,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18
] | 89.473684 |
[
5
] | 5.263158 | false | 96.258065 | 19 | 5 | 94.736842 | 0 |
def create_quote_fn(safe_charset, quote_plus):
def quote_fn(s, dont_quote):
if dont_quote is True:
safe = safe_charset
elif dont_quote is False:
safe = ''
else: # <dont_quote> is expected to be a string.
safe = dont_quote
# Prune duplicates and characters not in <safe_charset>.
safe = ''.join(set(safe) & set(safe_charset)) # E.g. '?^#?' -> '?'.
quoted = quote(s, safe)
if quote_plus:
quoted = quoted.replace('%20', '+')
return quoted
return quote_fn
| 28,140 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
is_valid_encoded_path_segment
|
(segment)
|
return is_valid_encoded_path_segment.regex.match(segment) is not None
| 210 | 211 |
def is_valid_encoded_path_segment(segment):
return is_valid_encoded_path_segment.regex.match(segment) is not None
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L210-L211
| 45 |
[
0,
1
] | 100 |
[] | 0 | true | 96.258065 | 2 | 1 | 100 | 0 |
def is_valid_encoded_path_segment(segment):
return is_valid_encoded_path_segment.regex.match(segment) is not None
| 28,141 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
is_valid_encoded_query_key
|
(key)
|
return is_valid_encoded_query_key.regex.match(key) is not None
| 216 | 217 |
def is_valid_encoded_query_key(key):
return is_valid_encoded_query_key.regex.match(key) is not None
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L216-L217
| 45 |
[
0,
1
] | 100 |
[] | 0 | true | 96.258065 | 2 | 1 | 100 | 0 |
def is_valid_encoded_query_key(key):
return is_valid_encoded_query_key.regex.match(key) is not None
| 28,142 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
is_valid_encoded_query_value
|
(value)
|
return is_valid_encoded_query_value.regex.match(value) is not None
| 222 | 223 |
def is_valid_encoded_query_value(value):
return is_valid_encoded_query_value.regex.match(value) is not None
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L222-L223
| 45 |
[
0,
1
] | 100 |
[] | 0 | true | 96.258065 | 2 | 1 | 100 | 0 |
def is_valid_encoded_query_value(value):
return is_valid_encoded_query_value.regex.match(value) is not None
| 28,143 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
is_valid_scheme
|
(scheme)
|
return is_valid_scheme.regex.match(scheme) is not None
| 227 | 228 |
def is_valid_scheme(scheme):
return is_valid_scheme.regex.match(scheme) is not None
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L227-L228
| 45 |
[
0,
1
] | 100 |
[] | 0 | true | 96.258065 | 2 | 1 | 100 | 0 |
def is_valid_scheme(scheme):
return is_valid_scheme.regex.match(scheme) is not None
| 28,144 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
is_valid_host
|
(hostname)
|
return '' not in toks
| 232 | 241 |
def is_valid_host(hostname):
toks = hostname.split('.')
if toks[-1] == '': # Trailing '.' in a fully qualified domain name.
toks.pop()
for tok in toks:
if is_valid_host.regex.search(tok) is not None:
return False
return '' not in toks
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L232-L241
| 45 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9
] | 100 |
[] | 0 | true | 96.258065 | 10 | 4 | 100 | 0 |
def is_valid_host(hostname):
toks = hostname.split('.')
if toks[-1] == '': # Trailing '.' in a fully qualified domain name.
toks.pop()
for tok in toks:
if is_valid_host.regex.search(tok) is not None:
return False
return '' not in toks
| 28,145 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
get_scheme
|
(url)
|
return scheme
| 244 | 259 |
def get_scheme(url):
if url.startswith(':'):
return ''
# Avoid incorrect scheme extraction with url.find(':') when other URL
# components, like the path, query, fragment, etc, may have a colon in
# them. For example, the URL 'a?query:', whose query has a ':' in it.
no_fragment = url.split('#', 1)[0]
no_query = no_fragment.split('?', 1)[0]
no_path_or_netloc = no_query.split('/', 1)[0]
scheme = url[:max(0, no_path_or_netloc.find(':'))] or None
if scheme is not None and not is_valid_scheme(scheme):
return None
return scheme
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L244-L259
| 45 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15
] | 100 |
[] | 0 | true | 96.258065 | 16 | 5 | 100 | 0 |
def get_scheme(url):
if url.startswith(':'):
return ''
# Avoid incorrect scheme extraction with url.find(':') when other URL
# components, like the path, query, fragment, etc, may have a colon in
# them. For example, the URL 'a?query:', whose query has a ':' in it.
no_fragment = url.split('#', 1)[0]
no_query = no_fragment.split('?', 1)[0]
no_path_or_netloc = no_query.split('/', 1)[0]
scheme = url[:max(0, no_path_or_netloc.find(':'))] or None
if scheme is not None and not is_valid_scheme(scheme):
return None
return scheme
| 28,146 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
strip_scheme
|
(url)
|
return url
| 262 | 267 |
def strip_scheme(url):
scheme = get_scheme(url) or ''
url = url[len(scheme):]
if url.startswith(':'):
url = url[1:]
return url
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L262-L267
| 45 |
[
0,
1,
2,
3,
4,
5
] | 100 |
[] | 0 | true | 96.258065 | 6 | 3 | 100 | 0 |
def strip_scheme(url):
scheme = get_scheme(url) or ''
url = url[len(scheme):]
if url.startswith(':'):
url = url[1:]
return url
| 28,147 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
set_scheme
|
(url, scheme)
| 270 | 275 |
def set_scheme(url, scheme):
after_scheme = strip_scheme(url)
if scheme is None:
return after_scheme
else:
return '%s:%s' % (scheme, after_scheme)
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L270-L275
| 45 |
[
0,
1,
2,
5
] | 66.666667 |
[
3
] | 16.666667 | false | 96.258065 | 6 | 2 | 83.333333 | 0 |
def set_scheme(url, scheme):
after_scheme = strip_scheme(url)
if scheme is None:
return after_scheme
else:
return '%s:%s' % (scheme, after_scheme)
| 28,148 |
|||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
has_netloc
|
(url)
|
return url.startswith('//' if scheme is None else scheme + '://')
| 279 | 281 |
def has_netloc(url):
scheme = get_scheme(url)
return url.startswith('//' if scheme is None else scheme + '://')
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L279-L281
| 45 |
[
0,
1,
2
] | 100 |
[] | 0 | true | 96.258065 | 3 | 1 | 100 | 0 |
def has_netloc(url):
scheme = get_scheme(url)
return url.startswith('//' if scheme is None else scheme + '://')
| 28,149 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
urlsplit
|
(url)
|
return urllib.parse.SplitResult(scheme, netloc, path, query, fragment)
|
Parameters:
url: URL string to split.
Returns: urlparse.SplitResult tuple subclass, just like
urlparse.urlsplit() returns, with fields (scheme, netloc, path,
query, fragment, username, password, hostname, port). See
http://docs.python.org/library/urlparse.html#urlparse.urlsplit
for more details on urlsplit().
|
Parameters:
url: URL string to split.
Returns: urlparse.SplitResult tuple subclass, just like
urlparse.urlsplit() returns, with fields (scheme, netloc, path,
query, fragment, username, password, hostname, port). See
http://docs.python.org/library/urlparse.html#urlparse.urlsplit
for more details on urlsplit().
| 284 | 324 |
def urlsplit(url):
"""
Parameters:
url: URL string to split.
Returns: urlparse.SplitResult tuple subclass, just like
urlparse.urlsplit() returns, with fields (scheme, netloc, path,
query, fragment, username, password, hostname, port). See
http://docs.python.org/library/urlparse.html#urlparse.urlsplit
for more details on urlsplit().
"""
original_scheme = get_scheme(url)
# urlsplit() parses URLs differently depending on whether or not the URL's
# scheme is in any of
#
# urllib.parse.uses_fragment
# urllib.parse.uses_netloc
# urllib.parse.uses_params
# urllib.parse.uses_query
# urllib.parse.uses_relative
#
# For consistent URL parsing, switch the URL's scheme to 'http', a scheme
# in all of the aforementioned uses_* lists, and afterwards revert to the
# original scheme (which may or may not be in some, or all, of the the
# uses_* lists).
if original_scheme is not None:
url = set_scheme(url, 'http')
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
# Detect and preserve the '//' before the netloc, if present. E.g. preserve
# URLs like 'http:', 'http://', and '///sup' correctly.
after_scheme = strip_scheme(url)
if after_scheme.startswith('//'):
netloc = netloc or ''
else:
netloc = None
scheme = original_scheme
return urllib.parse.SplitResult(scheme, netloc, path, query, fragment)
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L284-L324
| 45 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40
] | 100 |
[] | 0 | true | 96.258065 | 41 | 4 | 100 | 7 |
def urlsplit(url):
original_scheme = get_scheme(url)
# urlsplit() parses URLs differently depending on whether or not the URL's
# scheme is in any of
#
# urllib.parse.uses_fragment
# urllib.parse.uses_netloc
# urllib.parse.uses_params
# urllib.parse.uses_query
# urllib.parse.uses_relative
#
# For consistent URL parsing, switch the URL's scheme to 'http', a scheme
# in all of the aforementioned uses_* lists, and afterwards revert to the
# original scheme (which may or may not be in some, or all, of the the
# uses_* lists).
if original_scheme is not None:
url = set_scheme(url, 'http')
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
# Detect and preserve the '//' before the netloc, if present. E.g. preserve
# URLs like 'http:', 'http://', and '///sup' correctly.
after_scheme = strip_scheme(url)
if after_scheme.startswith('//'):
netloc = netloc or ''
else:
netloc = None
scheme = original_scheme
return urllib.parse.SplitResult(scheme, netloc, path, query, fragment)
| 28,150 |
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
urljoin
|
(base, url)
|
return joined
|
Parameters:
base: Base URL to join with <url>.
url: Relative or absolute URL to join with <base>.
Returns: The resultant URL from joining <base> and <url>.
|
Parameters:
base: Base URL to join with <url>.
url: Relative or absolute URL to join with <base>.
| 327 | 360 |
def urljoin(base, url):
"""
Parameters:
base: Base URL to join with <url>.
url: Relative or absolute URL to join with <base>.
Returns: The resultant URL from joining <base> and <url>.
"""
base_scheme = get_scheme(base) if has_netloc(base) else None
url_scheme = get_scheme(url) if has_netloc(url) else None
if base_scheme is not None:
# For consistent URL joining, switch the base URL's scheme to
# 'http'. urllib.parse.urljoin() behaves differently depending on the
# scheme. E.g.
#
# >>> urllib.parse.urljoin('http://google.com/', 'hi')
# 'http://google.com/hi'
#
# vs
#
# >>> urllib.parse.urljoin('asdf://google.com/', 'hi')
# 'hi'
root = set_scheme(base, 'http')
else:
root = base
joined = urllib.parse.urljoin(root, url)
new_scheme = url_scheme if url_scheme is not None else base_scheme
if new_scheme is not None and has_netloc(joined):
joined = set_scheme(joined, new_scheme)
return joined
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L327-L360
| 45 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33
] | 100 |
[] | 0 | true | 96.258065 | 34 | 4 | 100 | 5 |
def urljoin(base, url):
base_scheme = get_scheme(base) if has_netloc(base) else None
url_scheme = get_scheme(url) if has_netloc(url) else None
if base_scheme is not None:
# For consistent URL joining, switch the base URL's scheme to
# 'http'. urllib.parse.urljoin() behaves differently depending on the
# scheme. E.g.
#
# >>> urllib.parse.urljoin('http://google.com/', 'hi')
# 'http://google.com/hi'
#
# vs
#
# >>> urllib.parse.urljoin('asdf://google.com/', 'hi')
# 'hi'
root = set_scheme(base, 'http')
else:
root = base
joined = urllib.parse.urljoin(root, url)
new_scheme = url_scheme if url_scheme is not None else base_scheme
if new_scheme is not None and has_netloc(joined):
joined = set_scheme(joined, new_scheme)
return joined
| 28,151 |
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
join_path_segments
|
(*args)
|
return finals
|
Join multiple lists of path segments together, intelligently
handling path segments borders to preserve intended slashes of the
final constructed path.
This function is not encoding aware. It doesn't test for, or change,
the encoding of path segments it is passed.
Examples:
join_path_segments(['a'], ['b']) == ['a','b']
join_path_segments(['a',''], ['b']) == ['a','b']
join_path_segments(['a'], ['','b']) == ['a','b']
join_path_segments(['a',''], ['','b']) == ['a','','b']
join_path_segments(['a','b'], ['c','d']) == ['a','b','c','d']
Returns: A list containing the joined path segments.
|
Join multiple lists of path segments together, intelligently
handling path segments borders to preserve intended slashes of the
final constructed path.
| 363 | 398 |
def join_path_segments(*args):
"""
Join multiple lists of path segments together, intelligently
handling path segments borders to preserve intended slashes of the
final constructed path.
This function is not encoding aware. It doesn't test for, or change,
the encoding of path segments it is passed.
Examples:
join_path_segments(['a'], ['b']) == ['a','b']
join_path_segments(['a',''], ['b']) == ['a','b']
join_path_segments(['a'], ['','b']) == ['a','b']
join_path_segments(['a',''], ['','b']) == ['a','','b']
join_path_segments(['a','b'], ['c','d']) == ['a','b','c','d']
Returns: A list containing the joined path segments.
"""
finals = []
for segments in args:
if not segments or segments == ['']:
continue
elif not finals:
finals.extend(segments)
else:
# Example #1: ['a',''] + ['b'] == ['a','b']
# Example #2: ['a',''] + ['','b'] == ['a','','b']
if finals[-1] == '' and (segments[0] != '' or len(segments) > 1):
finals.pop(-1)
# Example: ['a'] + ['','b'] == ['a','b']
elif finals[-1] != '' and segments[0] == '' and len(segments) > 1:
segments = segments[1:]
finals.extend(segments)
return finals
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L363-L398
| 45 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35
] | 100 |
[] | 0 | true | 96.258065 | 36 | 11 | 100 | 15 |
def join_path_segments(*args):
finals = []
for segments in args:
if not segments or segments == ['']:
continue
elif not finals:
finals.extend(segments)
else:
# Example #1: ['a',''] + ['b'] == ['a','b']
# Example #2: ['a',''] + ['','b'] == ['a','','b']
if finals[-1] == '' and (segments[0] != '' or len(segments) > 1):
finals.pop(-1)
# Example: ['a'] + ['','b'] == ['a','b']
elif finals[-1] != '' and segments[0] == '' and len(segments) > 1:
segments = segments[1:]
finals.extend(segments)
return finals
| 28,152 |
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
remove_path_segments
|
(segments, remove)
|
return ret
|
Removes the path segments of <remove> from the end of the path
segments <segments>.
Examples:
# ('/a/b/c', 'b/c') -> '/a/'
remove_path_segments(['','a','b','c'], ['b','c']) == ['','a','']
# ('/a/b/c', '/b/c') -> '/a'
remove_path_segments(['','a','b','c'], ['','b','c']) == ['','a']
Returns: The list of all remaining path segments after the segments
in <remove> have been removed from the end of <segments>. If no
segments from <remove> were removed from <segments>, <segments> is
returned unmodified.
|
Removes the path segments of <remove> from the end of the path
segments <segments>.
| 401 | 441 |
def remove_path_segments(segments, remove):
"""
Removes the path segments of <remove> from the end of the path
segments <segments>.
Examples:
# ('/a/b/c', 'b/c') -> '/a/'
remove_path_segments(['','a','b','c'], ['b','c']) == ['','a','']
# ('/a/b/c', '/b/c') -> '/a'
remove_path_segments(['','a','b','c'], ['','b','c']) == ['','a']
Returns: The list of all remaining path segments after the segments
in <remove> have been removed from the end of <segments>. If no
segments from <remove> were removed from <segments>, <segments> is
returned unmodified.
"""
# [''] means a '/', which is properly represented by ['', ''].
if segments == ['']:
segments.append('')
if remove == ['']:
remove.append('')
ret = None
if remove == segments:
ret = []
elif len(remove) > len(segments):
ret = segments
else:
toremove = list(remove)
if len(remove) > 1 and remove[0] == '':
toremove.pop(0)
if toremove and toremove == segments[-1 * len(toremove):]:
ret = segments[:len(segments) - len(toremove)]
if remove[0] != '' and ret:
ret.append('')
else:
ret = segments
return ret
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L401-L441
| 45 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
31,
32,
33,
34,
35,
36,
37,
38,
39,
40
] | 100 |
[] | 0 | true | 96.258065 | 41 | 11 | 100 | 13 |
def remove_path_segments(segments, remove):
# [''] means a '/', which is properly represented by ['', ''].
if segments == ['']:
segments.append('')
if remove == ['']:
remove.append('')
ret = None
if remove == segments:
ret = []
elif len(remove) > len(segments):
ret = segments
else:
toremove = list(remove)
if len(remove) > 1 and remove[0] == '':
toremove.pop(0)
if toremove and toremove == segments[-1 * len(toremove):]:
ret = segments[:len(segments) - len(toremove)]
if remove[0] != '' and ret:
ret.append('')
else:
ret = segments
return ret
| 28,153 |
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
quacks_like_a_path_with_segments
|
(obj)
|
return (
hasattr(obj, 'segments') and
is_iterable_but_not_string(obj.segments))
| 444 | 447 |
def quacks_like_a_path_with_segments(obj):
return (
hasattr(obj, 'segments') and
is_iterable_but_not_string(obj.segments))
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L444-L447
| 45 |
[
0,
1
] | 50 |
[] | 0 | false | 96.258065 | 4 | 2 | 100 | 0 |
def quacks_like_a_path_with_segments(obj):
return (
hasattr(obj, 'segments') and
is_iterable_but_not_string(obj.segments))
| 28,154 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
Path.__init__
|
(self, path='', force_absolute=lambda _: False, strict=False)
| 491 | 498 |
def __init__(self, path='', force_absolute=lambda _: False, strict=False):
self.segments = []
self.strict = strict
self._isabsolute = False
self._force_absolute = force_absolute
self.load(path)
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L491-L498
| 45 |
[
0,
1,
2,
3,
4,
5,
6,
7
] | 100 |
[] | 0 | true | 96.258065 | 8 | 1 | 100 | 0 |
def __init__(self, path='', force_absolute=lambda _: False, strict=False):
self.segments = []
self.strict = strict
self._isabsolute = False
self._force_absolute = force_absolute
self.load(path)
| 28,155 |
|||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
Path.load
|
(self, path)
|
return self
|
Load <path>, replacing any existing path. <path> can either be
a Path instance, a list of segments, a path string to adopt.
Returns: <self>.
|
Load <path>, replacing any existing path. <path> can either be
a Path instance, a list of segments, a path string to adopt.
| 500 | 526 |
def load(self, path):
"""
Load <path>, replacing any existing path. <path> can either be
a Path instance, a list of segments, a path string to adopt.
Returns: <self>.
"""
if not path:
segments = []
elif quacks_like_a_path_with_segments(path): # Path interface.
segments = path.segments
elif is_iterable_but_not_string(path): # List interface.
segments = path
else: # String interface.
segments = self._segments_from_path(path)
if self._force_absolute(self):
self._isabsolute = True if segments else False
else:
self._isabsolute = (segments and segments[0] == '')
if self.isabsolute and len(segments) > 1 and segments[0] == '':
segments.pop(0)
self.segments = segments
return self
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L500-L526
| 45 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25,
26
] | 100 |
[] | 0 | true | 96.258065 | 27 | 9 | 100 | 4 |
def load(self, path):
if not path:
segments = []
elif quacks_like_a_path_with_segments(path): # Path interface.
segments = path.segments
elif is_iterable_but_not_string(path): # List interface.
segments = path
else: # String interface.
segments = self._segments_from_path(path)
if self._force_absolute(self):
self._isabsolute = True if segments else False
else:
self._isabsolute = (segments and segments[0] == '')
if self.isabsolute and len(segments) > 1 and segments[0] == '':
segments.pop(0)
self.segments = segments
return self
| 28,156 |
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
Path.add
|
(self, path)
|
return self
|
Add <path> to the existing path. <path> can either be a Path instance,
a list of segments, or a path string to append to the existing path.
Returns: <self>.
|
Add <path> to the existing path. <path> can either be a Path instance,
a list of segments, or a path string to append to the existing path.
| 528 | 553 |
def add(self, path):
"""
Add <path> to the existing path. <path> can either be a Path instance,
a list of segments, or a path string to append to the existing path.
Returns: <self>.
"""
if quacks_like_a_path_with_segments(path): # Path interface.
newsegments = path.segments
elif is_iterable_but_not_string(path): # List interface.
newsegments = path
else: # String interface.
newsegments = self._segments_from_path(path)
# Preserve the opening '/' if one exists already (self.segments
# == ['']).
if self.segments == [''] and newsegments and newsegments[0] != '':
newsegments.insert(0, '')
segments = self.segments
if self.isabsolute and self.segments and self.segments[0] != '':
segments.insert(0, '')
self.load(join_path_segments(segments, newsegments))
return self
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L528-L553
| 45 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16,
17,
18,
19,
20,
21,
22,
23,
24,
25
] | 100 |
[] | 0 | true | 96.258065 | 26 | 9 | 100 | 4 |
def add(self, path):
if quacks_like_a_path_with_segments(path): # Path interface.
newsegments = path.segments
elif is_iterable_but_not_string(path): # List interface.
newsegments = path
else: # String interface.
newsegments = self._segments_from_path(path)
# Preserve the opening '/' if one exists already (self.segments
# == ['']).
if self.segments == [''] and newsegments and newsegments[0] != '':
newsegments.insert(0, '')
segments = self.segments
if self.isabsolute and self.segments and self.segments[0] != '':
segments.insert(0, '')
self.load(join_path_segments(segments, newsegments))
return self
| 28,157 |
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
Path.set
|
(self, path)
|
return self
| 555 | 557 |
def set(self, path):
self.load(path)
return self
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L555-L557
| 45 |
[
0,
1,
2
] | 100 |
[] | 0 | true | 96.258065 | 3 | 1 | 100 | 0 |
def set(self, path):
self.load(path)
return self
| 28,158 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
Path.remove
|
(self, path)
|
return self
| 559 | 570 |
def remove(self, path):
if path is True:
self.load('')
else:
if is_iterable_but_not_string(path): # List interface.
segments = path
else: # String interface.
segments = self._segments_from_path(path)
base = ([''] if self.isabsolute else []) + self.segments
self.load(remove_path_segments(base, segments))
return self
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L559-L570
| 45 |
[
0,
1,
2,
4,
5,
7,
8,
9,
10,
11
] | 83.333333 |
[] | 0 | false | 96.258065 | 12 | 3 | 100 | 0 |
def remove(self, path):
if path is True:
self.load('')
else:
if is_iterable_but_not_string(path): # List interface.
segments = path
else: # String interface.
segments = self._segments_from_path(path)
base = ([''] if self.isabsolute else []) + self.segments
self.load(remove_path_segments(base, segments))
return self
| 28,159 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
Path.normalize
|
(self)
|
return self
|
Normalize the path. Turn '//a/./b/../c//' into '/a/c/'.
Returns: <self>.
|
Normalize the path. Turn '//a/./b/../c//' into '/a/c/'.
| 572 | 584 |
def normalize(self):
"""
Normalize the path. Turn '//a/./b/../c//' into '/a/c/'.
Returns: <self>.
"""
if str(self):
normalized = normpath(str(self)) + ('/' * self.isdir)
if normalized.startswith('//'): # http://bugs.python.org/636648
normalized = '/' + normalized.lstrip('/')
self.load(normalized)
return self
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L572-L584
| 45 |
[
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12
] | 100 |
[] | 0 | true | 96.258065 | 13 | 3 | 100 | 3 |
def normalize(self):
if str(self):
normalized = normpath(str(self)) + ('/' * self.isdir)
if normalized.startswith('//'): # http://bugs.python.org/636648
normalized = '/' + normalized.lstrip('/')
self.load(normalized)
return self
| 28,160 |
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
Path.asdict
|
(self)
|
return {
'encoded': str(self),
'isdir': self.isdir,
'isfile': self.isfile,
'segments': self.segments,
'isabsolute': self.isabsolute,
}
| 586 | 593 |
def asdict(self):
return {
'encoded': str(self),
'isdir': self.isdir,
'isfile': self.isfile,
'segments': self.segments,
'isabsolute': self.isabsolute,
}
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L586-L593
| 45 |
[
0,
1
] | 25 |
[] | 0 | false | 96.258065 | 8 | 1 | 100 | 0 |
def asdict(self):
return {
'encoded': str(self),
'isdir': self.isdir,
'isfile': self.isfile,
'segments': self.segments,
'isabsolute': self.isabsolute,
}
| 28,161 |
||
gruns/furl
|
774846234ff803606fdd289a7549f9b50b2b3677
|
furl/furl.py
|
Path.isabsolute
|
(self)
|
return self._isabsolute
| 596 | 599 |
def isabsolute(self):
if self._force_absolute(self):
return True
return self._isabsolute
|
https://github.com/gruns/furl/blob/774846234ff803606fdd289a7549f9b50b2b3677/project45/furl/furl.py#L596-L599
| 45 |
[
0,
1,
2,
3
] | 100 |
[] | 0 | true | 96.258065 | 4 | 2 | 100 | 0 |
def isabsolute(self):
if self._force_absolute(self):
return True
return self._isabsolute
| 28,162 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.