query
stringlengths 9
60
| language
stringclasses 1
value | code
stringlengths 105
25.7k
| url
stringlengths 91
217
|
---|---|---|---|
get inner html
|
python
|
def innerHTML(self) -> str:
"""Get innerHTML of the inner node."""
if self._inner_element:
return self._inner_element.innerHTML
return super().innerHTML
|
https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/tag.py#L155-L159
|
get inner html
|
python
|
def html(self):
"""
Returns ``innerHTML`` of whole page. On page have to be tag ``body``.
.. versionadded:: 2.2
"""
try:
body = self.get_elm(tag_name='body')
except selenium_exc.NoSuchElementException:
return None
else:
return body.get_attribute('innerHTML')
|
https://github.com/horejsek/python-webdriverwrapper/blob/a492f79ab60ed83d860dd817b6a0961500d7e3f5/webdriverwrapper/wrapper.py#L408-L419
|
get inner html
|
python
|
def innerHTML(self):
'''
innerHTML - Returns an HTML string of the inner contents of this tag, including children.
@return - String of inner contents HTML
'''
# If a self-closing tag, there are no contents
if self.isSelfClosing is True:
return ''
# Assemble all the blocks.
ret = []
# Iterate through blocks
for block in self.blocks:
# For each block:
# If a tag, append the outer html (start tag, contents, and end tag)
# Else, append the text node directly
if isinstance(block, AdvancedTag):
ret.append(block.outerHTML)
else:
ret.append(block)
return ''.join(ret)
|
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Tags.py#L1452-L1477
|
get inner html
|
python
|
def html(self) -> str:
"""Get whole html representation of this node."""
if self._inner_element:
return self.start_tag + self._inner_element.html + self.end_tag
return super().html
|
https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/tag.py#L148-L152
|
get inner html
|
python
|
def __get_html(self, body=None):
"""
Returns the html content with given body tag content.
:param body: Body tag content.
:type body: unicode
:return: Html.
:rtype: unicode
"""
output = []
output.append("<html>")
output.append("<head>")
for javascript in (self.__jquery_javascript,
self.__crittercism_javascript,
self.__reporter_javascript):
output.append("<script type=\"text/javascript\">")
output.append(javascript)
output.append("</script>")
output.append("<style type=\"text/css\">")
output.append(self.__style)
output.append("</style>")
output.append("</head>")
if body is not None:
output.append(body)
else:
output.append("<body>")
output.append("<div id=\"report\">")
output.append("</div>")
output.append("</body>")
output.append("</html>")
return "\n".join(output)
|
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/reporter.py#L435-L466
|
get inner html
|
python
|
def innerHTML(self, html: str) -> None: # type: ignore
"""Set innerHTML both on this node and related browser node."""
df = self._parse_html(html)
if self.connected:
self._set_inner_html_web(df.html)
self._empty()
self._append_child(df)
|
https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/web_node.py#L305-L311
|
get inner html
|
python
|
def getHTML(self):
'''
getHTML - Get the full HTML as contained within this tree.
If parsed from a document, this will contain the original whitespacing.
@returns - <str> of html
@see getFormattedHTML
@see getMiniHTML
'''
root = self.getRoot()
if root is None:
raise ValueError('Did not parse anything. Use parseFile or parseStr')
if self.doctype:
doctypeStr = '<!%s>\n' %(self.doctype)
else:
doctypeStr = ''
# 6.6.0: If we have a real root tag, print the outerHTML. If we have a fake root tag (for multiple root condition),
# then print the innerHTML (skipping the outer root tag). Otherwise, we will miss
# untagged text (between the multiple root nodes).
rootNode = self.getRoot()
if rootNode.tagName == INVISIBLE_ROOT_TAG:
return doctypeStr + rootNode.innerHTML
else:
return doctypeStr + rootNode.outerHTML
|
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Parser.py#L768-L796
|
get inner html
|
python
|
def get_html(self):
"""Generates if need be and returns a simpler html document with text"""
if self.__htmltree is not None:
return self.__htmltree
else:
self.__make_tree()
return self.__htmltree
|
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/eatiht_trees.py#L252-L258
|
get inner html
|
python
|
def _get_html(self, url):
""" Get html from url
"""
self.log.info(u"/GET {}".format(url))
r = requests.get(url)
if hasattr(r, 'from_cache'):
if r.from_cache:
self.log.info("(from cache)")
if r.status_code != 200:
throw_request_err(r)
return r.content
|
https://github.com/jplusplus/statscraper/blob/932ec048b23d15b3dbdaf829facc55fd78ec0109/statscraper/scrapers/VantetiderScraper.py#L141-L153
|
get inner html
|
python
|
def get_text(element):
''' return inner text in pyquery element '''
_add_links_to_text(element)
try:
return element.text(squash_space=False)
except TypeError:
return element.text()
|
https://github.com/gleitz/howdoi/blob/94f0429b4e99cb914aadfca0f27257ea801471ac/howdoi/howdoi.py#L157-L163
|
get inner html
|
python
|
def get_body_content(self):
"""
Returns content of BODY element for this HTML document. Content will be of type 'str' (Python 2)
or 'bytes' (Python 3).
:Returns:
Returns content of this document.
"""
try:
html_tree = parse_html_string(self.content)
except:
return ''
html_root = html_tree.getroottree()
if len(html_root.find('body')) != 0:
body = html_tree.find('body')
tree_str = etree.tostring(body, pretty_print=True, encoding='utf-8', xml_declaration=False)
# this is so stupid
if tree_str.startswith(six.b('<body>')):
n = tree_str.rindex(six.b('</body>'))
return tree_str[6:n]
return tree_str
return ''
|
https://github.com/aerkalov/ebooklib/blob/305f2dd7f02923ffabf9586a5d16266113d00c4a/ebooklib/epub.py#L341-L370
|
get inner html
|
python
|
def inner_content(node):
"""
Returns the inner content of a given XML node, including tags.
Args:
node (lxml.etree.Element): The node whose inner content is desired.
Returns:
str: The inner content of the node.
"""
from lxml import etree
# Include text content at the start of the node.
parts = [node.text]
for child in node.getchildren():
# Include the child serialized to raw XML.
parts.append(etree.tostring(child, encoding="utf-8"))
# Include any text following the child.
parts.append(child.tail)
# Discard any non-existent text parts and return.
return "".join(filter(None, parts))
|
https://github.com/elliterate/capybara.py/blob/0c6ae449cc37e4445ec3cd6af95674533beedc6c/capybara/utils.py#L75-L99
|
get inner html
|
python
|
def get_html_content(self):
"""
Parses the element and subelements and parses any HTML enabled text to
its original HTML form for rendering.
:returns: Parsed HTML enabled text content.
:rtype: str
"""
# Extract full element node content (including subelements)
html_content = ''
if hasattr(self, 'xml_element'):
xml = self.xml_element
content_list = ["" if xml.text is None else xml.text]
def to_string(xml):
if isinstance(xml, _Comment):
return str(xml)
else:
return ElementTree.tostring(xml).decode('utf-8')
content_list += [to_string(e) for e in xml.getchildren()]
full_xml_content = "".join(content_list)
# Parse tags to generate HTML valid content
first_regex = r'html:'
second_regex = r' xmlns:html=(["\'])(?:(?=(\\?))\2.)*?\1'
html_content = re.sub(first_regex, '',
re.sub(second_regex, '', full_xml_content))
return html_content
|
https://github.com/rodynnz/xccdf/blob/1b9dc2f06b5cce8db2a54c5f95a8f6bcf5cb6981/src/xccdf/models/html_element.py#L68-L99
|
get inner html
|
python
|
def get_html(self) -> str:
"""Return complete report as a HTML string."""
data = self.getdoc()
num_checks = 0
body_elements = []
# Order by section first...
for section in data["sections"]:
section_name = html.escape(section["key"][0])
section_stati_of_note = (
e for e in section["result"].elements() if e != "PASS"
)
section_stati = "".join(
EMOTICON[s] for s in sorted(section_stati_of_note, key=LOGLEVELS.index)
)
body_elements.append(f"<h2>{section_name} {section_stati}</h2>")
checks_by_id: Dict[str, List[Dict[str, str]]] = collections.defaultdict(
list
)
# ...and check second.
for cluster in section["checks"]:
if not isinstance(cluster, list):
cluster = [cluster]
num_checks += len(cluster)
for check in cluster:
checks_by_id[check["key"][1]].append(check)
for check, results in checks_by_id.items():
check_name = html.escape(check)
body_elements.append(f"<h3>{results[0]['description']}</h3>")
body_elements.append(f"<div>Check ID: {check_name}</div>")
for result in results:
if "filename" in result:
body_elements.append(
html5_collapsible(
f"{EMOTICON[result['result']]} <strong>{result['filename']}</strong>",
self.html_for_check(result),
)
)
else:
body_elements.append(
html5_collapsible(
f"{EMOTICON[result['result']]} <strong>Family check</strong>",
self.html_for_check(result),
)
)
body_top = [
"<h1>Fontbakery Technical Report</h1>",
"<div>If you think a check is flawed or have an idea for a check, please "
f" file an issue at <a href='{ISSUE_URL}'>{ISSUE_URL}</a> and remember "
"to include a pointer to the repo and branch you're checking.</div>",
]
if num_checks:
results_summary = [data["result"][k] for k in LOGLEVELS]
body_top.append(summary_table(*results_summary, num_checks))
omitted = [l for l in LOGLEVELS if self.omit_loglevel(l)]
if omitted:
body_top.append(
"<p><strong>Note:</strong>"
" The following loglevels were omitted in this report:"
f" {', '.join(omitted)}</p>"
)
body_elements[0:0] = body_top
return html5_document(body_elements)
|
https://github.com/googlefonts/fontbakery/blob/b355aea2e619a4477769e060d24c32448aa65399/Lib/fontbakery/reporters/html.py#L29-L96
|
get inner html
|
python
|
def get_html(self,
url,
params=None,
cache_cb=None,
**kwargs):
"""
Get html of an url.
"""
url = add_params(url, params)
cache_consumed, value = self.try_read_cache(url)
if cache_consumed:
html = value
else:
self._create_driver()
self.driver.get(url)
html = self.driver.page_source
if self.should_we_update_cache(html, cache_cb, cache_consumed):
self.cache.set(
url, html,
expire=kwargs.get("cache_expire", self.cache_expire),
)
return html
|
https://github.com/MacHu-GWU/crawlib-project/blob/241516f2a7a0a32c692f7af35a1f44064e8ce1ab/crawlib/downloader/selenium_downloader.py#L101-L123
|
get inner html
|
python
|
def get_elements(html_file, tags):
"""
Extract all the elements we're interested in.
Returns a list of tuples with the attribute as first item
and the list of elements as the second item.
"""
with open(html_file) as f:
document = BeautifulSoup(f, 'html.parser')
def condition(tag, attr):
# Don't include external links
return lambda x: x.name == tag \
and not x.get(attr, 'http').startswith(('http', '//'))
all_tags = [(attr, document.find_all(condition(tag, attr)))
for tag, attr in tags]
return all_tags
|
https://github.com/danidee10/Staticfy/blob/ebc555b00377394b0f714e4a173d37833fec90cb/staticfy/staticfy.py#L71-L89
|
get inner html
|
python
|
def get_header(self, elem, style, node):
"""Returns HTML tag representing specific header for this element.
:Returns:
String representation of HTML tag.
"""
font_size = style
if hasattr(elem, 'possible_header'):
if elem.possible_header:
return 'h1'
if not style:
return 'h6'
if hasattr(style, 'style_id'):
font_size = _get_font_size(self.doc, style)
try:
if font_size in self.doc.possible_headers_style:
return 'h{}'.format(self.doc.possible_headers_style.index(font_size)+1)
return 'h{}'.format(self.doc.possible_headers.index(font_size)+1)
except ValueError:
return 'h6'
|
https://github.com/booktype/python-ooxml/blob/b56990a5bee2e1bc46839cec5161ff3726dc4d87/ooxml/serialize.py#L1005-L1029
|
get inner html
|
python
|
def extract_html_urls(self, html):
"""
Take all ``<img src="..">`` from the HTML
"""
p = HTMLParser(tree=treebuilders.getTreeBuilder("dom"))
dom = p.parse(html)
urls = []
for img in dom.getElementsByTagName('img'):
src = img.getAttribute('src')
if src:
urls.append(unquote_utf8(src))
srcset = img.getAttribute('srcset')
if srcset:
urls += self.extract_srcset(srcset)
for source in dom.getElementsByTagName('source'):
srcset = source.getAttribute('srcset')
if srcset:
urls += self.extract_srcset(srcset)
for source in dom.getElementsByTagName('a'):
href = source.getAttribute('href')
if href:
urls.append(unquote_utf8(href))
return urls
|
https://github.com/django-fluent/django-fluent-contents/blob/896f14add58471b98d7aa295b2c9e6abedec9003/fluent_contents/management/commands/find_contentitem_urls.py#L119-L146
|
get inner html
|
python
|
def get_html(self,
url,
params=None,
cache_cb=None,
decoder_encoding=None,
decoder_errors=url_specified_decoder.ErrorsHandle.strict,
**kwargs):
"""
Get html of an url.
"""
response = self.get(
url=url,
params=params,
cache_cb=cache_cb,
**kwargs
)
return url_specified_decoder.decode(
binary=response.content,
url=response.url,
encoding=decoder_encoding,
errors=decoder_errors,
)
|
https://github.com/MacHu-GWU/crawlib-project/blob/241516f2a7a0a32c692f7af35a1f44064e8ce1ab/crawlib/downloader/requests_downloader.py#L140-L161
|
get inner html
|
python
|
def html(self) -> _BaseHTML:
"""Unicode representation of the HTML content
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self.raw_html.decode(self.encoding, errors='replace')
else:
return etree.tostring(self.element, encoding='unicode').strip()
|
https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L100-L107
|
get inner html
|
python
|
def get_element_with_text(self, locator, text, params=None, timeout=None, visible=False):
"""
Get element that contains <text> either by text or by attribute value.
Note: if timeout is 0, this function will not wait for the element(s) to become present.
:param locator: locator tuple or list of WebElements
:param text: text that the element should contain
:param params: (optional) locator parameters
:param timeout: (optional) time to wait for text (default: self._explicit_wait)
:param visible: (optional) if the element should also be visible (default: False)
:return: WebElement instance
"""
if timeout is None:
timeout = self._explicit_wait
@wait(exceptions=ElementNotVisibleException, timeout=timeout)
def _wait_for_text():
return self.is_element_with_text_present(locator, text, params, visible)
msg = "Element with type <{}>, locator <{}> and text <{text}> was never located!".format(
*locator, text=text) if not isinstance(locator, list) else \
"None of the elements had the text: {}".format(text)
if timeout == 0:
return self.is_element_with_text_present(locator, text, params, visible)
try:
return _wait_for_text()
except RuntimeError as e:
LOGGER.debug(e)
raise NoSuchElementException(msg)
|
https://github.com/Projectplace/basepage/blob/735476877eb100db0981590a6d12140e68652167/basepage/base_page.py#L367-L398
|
get inner html
|
python
|
def get_inner_text(text, entities):
"""
Gets the inner text that's surrounded by the given entities.
For instance: text = 'hey!', entity = MessageEntityBold(2, 2) -> 'y!'.
:param text: the original text.
:param entities: the entity or entities that must be matched.
:return: a single result or a list of the text surrounded by the entities.
"""
text = add_surrogate(text)
result = []
for e in entities:
start = e.offset
end = e.offset + e.length
result.append(del_surrogate(text[start:end]))
return result
|
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/utils.py#L742-L758
|
get inner html
|
python
|
def _get_element_text_or_none(document, selector):
"""
Using a CSS selector, get the element and return the text, or None if no element.
:arg document: ``HTMLElement`` document
:arg selector: CSS selector
:returns: str or None
"""
element = document.cssselect(selector)
if element:
return element[0].text
return None
|
https://github.com/jaywink/federation/blob/59d31bb37e662891dbea72c1dee05dc53146c78b/federation/utils/diaspora.py#L111-L122
|
get inner html
|
python
|
def get_element(self, tag_name, attribute, **attribute_filter):
"""
Return element in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:rtype: string
"""
for i in self.xml:
if self.xml[i] is None :
continue
tag = self.xml[i].getElementsByTagName(tag_name)
if tag is None:
return None
for item in tag:
skip_this_item = False
for attr, val in list(attribute_filter.items()):
attr_val = item.getAttributeNS(NS_ANDROID_URI, attr)
if attr_val != val:
skip_this_item = True
break
if skip_this_item:
continue
value = item.getAttributeNS(NS_ANDROID_URI, attribute)
if len(value) > 0:
return value
return None
|
https://github.com/zeaphoo/reston/blob/96502487b2259572df55237c9526f92627465088/reston/core/apk.py#L502-L534
|
get inner html
|
python
|
def html(self):
"""
Render this as html.
:return:
"""
classes = list()
package = ""
if self.package is not None:
package = "Package: " + self.package + "<br/>"
for classname in self.classes:
classes.append(self.classes[classname].html())
errs = ""
for error in self.errors:
if not len(errs):
errs += "<tr><th colspan='2' align='left'>Errors</th></tr>"
for part in ["type", "message", "text"]:
if part in error:
errs += "<tr><td>{}</td><td><pre>{}</pre></td></tr>".format(
part,
tag.text(error[part]))
stdio = ""
if self.stderr or self.stdout:
stdio += "<tr><th colspan='2' align='left'>Output</th></tr>"
if self.stderr:
stdio += "<tr><td>Stderr</td><td><pre>{}</pre></td></tr>".format(
tag.text(self.stderr))
if self.stdout:
stdio += "<tr><td>Stdout</td><td><pre>{}</pre></td></tr>".format(
tag.text(self.stdout))
props = ""
if len(self.properties):
props += "<table>"
propnames = sorted(self.properties)
for prop in propnames:
props += "<tr><th>{}</th><td>{}</td></tr>".format(prop, self.properties[prop])
props += "</table>"
return """
<div class="testsuite">
<h2>Test Suite: {name}</h2><a name="{anchor}">
{package}
{properties}
<table>
<tr><th align="left">Duration</th><td align="right">{duration} sec</td></tr>
<tr><th align="left">Test Cases</th><td align="right">{count}</td></tr>
<tr><th align="left">Failures</th><td align="right">{fails}</td></tr>
{errs}
{stdio}
</table>
<a name="toc"></a>
<h2>Results Index</h2>
{toc}
<hr size="2"/>
<h2>Test Results</h2>
<div class="testclasses">
{classes}
</div>
</div>
""".format(name=tag.text(self.name),
anchor=self.anchor(),
duration=self.duration,
errs=errs,
stdio=stdio,
toc=self.toc(),
package=package,
properties=props,
classes="".join(classes),
count=len(self.all()),
fails=len(self.failed()))
|
https://github.com/inorton/junit2html/blob/73ff9d84c41b60148e86ce597ef605a0f1976d4b/junit2htmlreport/parser.py#L323-L396
|
get inner html
|
python
|
def get_text(self, locator):
"""Get element text (for hybrid and mobile browser use `xpath` locator, others might cause problem)
Example:
| ${text} | Get Text | //*[contains(@text,'foo')] |
New in AppiumLibrary 1.4.
"""
text = self._get_text(locator)
self._info("Element '%s' text is '%s' " % (locator, text))
return text
|
https://github.com/serhatbolsu/robotframework-appiumlibrary/blob/91c808cf0602af6be8135ac529fa488fded04a85/AppiumLibrary/keywords/_element.py#L402-L413
|
get inner html
|
python
|
def getElement(self, namespaceURI, localName):
'''
Keyword arguments:
namespaceURI -- namespace of element
localName -- local name of element
'''
node = self._dom.getElement(self.node, localName, namespaceURI, default=None)
if node:
return ElementProxy(self.sw, node)
return None
|
https://github.com/rameshg87/pyremotevbox/blob/123dffff27da57c8faa3ac1dd4c68b1cf4558b1a/pyremotevbox/ZSI/wstools/Utility.py#L1069-L1078
|
get inner html
|
python
|
def get_index_html (urls):
"""
Construct artificial index.html from given URLs.
@param urls: URL strings
@type urls: iterator of string
"""
lines = ["<html>", "<body>"]
for entry in urls:
name = cgi.escape(entry)
try:
url = cgi.escape(urllib.quote(entry))
except KeyError:
# Some unicode entries raise KeyError.
url = name
lines.append('<a href="%s">%s</a>' % (url, name))
lines.extend(["</body>", "</html>"])
return os.linesep.join(lines)
|
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/__init__.py#L157-L174
|
get inner html
|
python
|
def getElementById(self, id: str) -> Optional[Node]:
"""Get element by ``id``.
If this document does not have the element with the id, return None.
"""
elm = getElementById(id)
if elm and elm.ownerDocument is self:
return elm
return None
|
https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/document.py#L185-L193
|
get inner html
|
python
|
def get(self) -> None:
"""Return whole html representation of the root document."""
from wdom.document import get_document
logger.info('connected')
self.write(get_document().build())
|
https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/server/_tornado.py#L35-L39
|
get inner html
|
python
|
def getMiniHTML(self):
'''
getMiniHTML - Gets the HTML representation of this document without any pretty formatting
and disregarding original whitespace beyond the functional.
@return <str> - HTML with only functional whitespace present
'''
from .Formatter import AdvancedHTMLMiniFormatter
html = self.getHTML()
formatter = AdvancedHTMLMiniFormatter(None) # Do not double-encode
formatter.feed(html)
return formatter.getHTML()
|
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Parser.py#L826-L837
|
get inner html
|
python
|
def html(self) -> str:
"""Return string representation of this.
Used in start tag of HTML representation of the Element node.
"""
if self._owner and self.name in self._owner._special_attr_boolean:
return self.name
else:
value = self.value
if isinstance(value, str):
value = html_.escape(value)
return '{name}="{value}"'.format(name=self.name, value=value)
|
https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/element.py#L175-L186
|
get inner html
|
python
|
def __get_isbn(self, html):
"""
从图书借阅状态页面中获取isbn
:param html:
:return:
"""
import re
reg = re.compile(r'getBookCover\(".*","(.*)"\);')
res = reg.findall(html)
if len(res) > 0:
return res[0]
else:
return ''
|
https://github.com/Jayin/ETipsService/blob/1a42612a5e5d11bec0ec1a26c99dec6fe216fca4/service/wyulibrary.py#L111-L125
|
get inner html
|
python
|
def html(self) -> str:
"""Return html-escaped string representation of this node."""
if self.parentNode and self.parentNode._should_escape_text:
return html.escape(self.data)
return self.data
|
https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/node.py#L635-L639
|
get inner html
|
python
|
def _get_elements(self, source):
"""
Returns the list of HtmlElements for the source
:param source: The source list to parse
:type source: list
:returns: A list of HtmlElements
:rtype: list
"""
return list(chain(*[self.tree.xpath(xpath) for xpath in source]))
|
https://github.com/jurismarches/chopper/blob/53c5489a53e3a5d205a5cb207df751c09633e7ce/chopper/html/extractor.py#L106-L115
|
get inner html
|
python
|
def get(self, index):
"""
Get the element by index. If index is out of bounds for
the internal list, None is returned. Indexes cannot be
negative.
:param int index: retrieve element by positive index in list
:rtype: SubElement or None
"""
if self and (index <= len(self) -1):
return self._result_cache[index]
|
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/base/collection.py#L116-L126
|
get inner html
|
python
|
def extract(self, html_contents, css_contents=None, base_url=None):
"""
Extracts the cleaned html tree as a string and only
css rules matching the cleaned html tree
:param html_contents: The HTML contents to parse
:type html_contents: str
:param css_contents: The CSS contents to parse
:type css_contents: str
:param base_url: The base page URL to use for relative to absolute links
:type base_url: str
:returns: cleaned HTML contents, cleaned CSS contents
:rtype: str or tuple
"""
# Clean HTML
html_extractor = self.html_extractor(
html_contents, self._xpaths_to_keep, self._xpaths_to_discard)
has_matches = html_extractor.parse()
if has_matches:
# Relative to absolute URLs
if base_url is not None:
html_extractor.rel_to_abs(base_url)
# Convert ElementTree to string
cleaned_html = html_extractor.to_string()
else:
cleaned_html = None
# Clean CSS
if css_contents is not None:
if cleaned_html is not None:
css_extractor = self.css_extractor(css_contents, cleaned_html)
css_extractor.parse()
# Relative to absolute URLs
if base_url is not None:
css_extractor.rel_to_abs(base_url)
cleaned_css = css_extractor.to_string()
else:
cleaned_css = None
else:
return cleaned_html
return (cleaned_html, cleaned_css)
|
https://github.com/jurismarches/chopper/blob/53c5489a53e3a5d205a5cb207df751c09633e7ce/chopper/extractor.py#L58-L110
|
get inner html
|
python
|
def get_html(self):
""" Downloads HTML content of page given the page_url"""
if self.use_ghost:
self.url = urljoin("http://", self.url)
import selenium
import selenium.webdriver
driver = selenium.webdriver.PhantomJS(
service_log_path=os.path.devnull)
driver.get(self.url)
page_html = driver.page_source
page_url = driver.current_url
driver.quit()
else:
if self.proxy_url:
print("Using proxy: " + self.proxy_url + "\n")
try:
page = requests.get(self.url, proxies=self.proxies)
if page.status_code != 200:
raise PageLoadError(page.status_code)
except requests.exceptions.MissingSchema:
self.url = "http://" + self.url
page = requests.get(self.url, proxies=self.proxies)
if page.status_code != 200:
raise PageLoadError(page.status_code)
except requests.exceptions.ConnectionError:
raise PageLoadError(None)
try:
page_html = page.text
page_url = page.url
except UnboundLocalError:
raise PageLoadError(None)
self.page_html = page_html
self.page_url = page_url
return (self.page_html, self.page_url)
|
https://github.com/sananth12/ImageScraper/blob/04cdefaa184420637d02b5a285cf407bbd428929/image_scraper/utils.py#L101-L136
|
get inner html
|
python
|
def get_i_text(node):
"""
Get the text for an Indicator node.
:param node: Indicator node.
:return:
"""
if node.tag != 'Indicator':
raise IOCParseError('Invalid tag: {}'.format(node.tag))
s = node.get('operator').upper()
return s
|
https://github.com/mandiant/ioc_writer/blob/712247f3a10bdc2584fa18ac909fc763f71df21a/ioc_writer/ioc_api.py#L694-L704
|
get inner html
|
python
|
def get_text_for_html(html_content):
'''
Take the HTML content (from, for example, an email)
and construct a simple plain text version of that content
(for example, for inclusion in a multipart email message).
'''
soup = BeautifulSoup(html_content)
# kill all script and style elements
for script in soup(["script", "style"]):
script.extract() # rip it out
# Replace all links with HREF with the link text and the href in brackets
for a in soup.findAll('a', href=True):
a.replaceWith('%s <%s>' % (a.string, a.get('href')))
# get text
text = soup.get_text()
# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
# drop blank lines
text = '\n'.join(chunk for chunk in chunks if chunk)
return text
|
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/core/utils/emails.py#L4-L31
|
get inner html
|
python
|
def get_absolute_url_link(self, text=None, cls=None, icon_class=None,
**attrs):
"""Gets the html link for the object."""
if text is None:
text = self.get_link_text()
return build_link(href=self.get_absolute_url(),
text=text,
cls=cls,
icon_class=icon_class,
**attrs)
|
https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/db/models/mixins/urls.py#L26-L36
|
get inner html
|
python
|
def getElementById(self, _id):
'''
getElementById - Gets an element within this collection by id
@param _id - string of "id" attribute
@return - a single tag matching the id, or None if none found
'''
for tag in self:
if tag.id == _id:
return tag
for subtag in tag.children:
tmp = subtag.getElementById(_id)
if tmp is not None:
return tmp
return None
|
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Tags.py#L2340-L2355
|
get inner html
|
python
|
def to_html(self):
"""Render as html
Args:
None
Returns:
Str the html representation
Raises:
Errors are propagated
"""
icon = self.html_icon()
attributes = self.html_attributes()
# Deal with long file names that prevent wrapping
wrappable_text = self.to_text().replace(os.sep, '<wbr>' + os.sep)
if icon is not '' and attributes is not '':
return '<span%s>%s%s</span>' % (attributes, icon, wrappable_text)
else:
return self.to_text()
|
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/messaging/item/text.py#L151-L170
|
get inner html
|
python
|
def extract(self, check_url=None, http_equiv_refresh=True):
"""
Downloads HTML <head> tag first, extracts data from it using
specific head techniques, loads it and checks if is complete.
Otherwise downloads the HTML <body> tag as well and loads data
extracted by using appropriate semantic techniques.
Eagerly calls check_url(url) if any, before parsing the HTML.
Provided function should raise an exception to break extraction.
E.g.: URL has been summarized before; URL points to off limits
websites like foursquare.com, facebook.com, bitly.com and so on.
"""
# assert self._is_clear()
logger = logging.getLogger(__name__)
logger.info("Extract: %s", self.clean_url)
with closing(request.get(self.clean_url, stream=True)) as response:
response.raise_for_status()
mime = response.headers.get('content-type')
if mime and not ('html' in mime.lower()):
raise HTMLParseError('Invalid Content-Type: %s' % mime)
self.clean_url = self._clean_url(response.url)
if self.clean_url is None:
raise URLError('Bad url: %s' % response.url)
if check_url is not None:
check_url(url=self.clean_url)
encoding = config.ENCODING or response.encoding
self._html = ""
if config.PHANTOMJS_BIN and \
site(self.clean_url) in config.PHANTOMJS_SITES:
self._html = request.phantomjs_get(self.clean_url)
response.consumed = True
head = self._get_tag(response, tag_name="head", encoding=encoding)
if http_equiv_refresh:
# Check meta http-equiv refresh tag
html = head or decode(self._html, encoding)
self._extract(html, self.clean_url, [
"summary.techniques.HTTPEquivRefreshTags",
])
new_url = self.urls and self.urls[0]
if new_url and new_url != self.clean_url:
logger.warning("Refresh: %s", new_url)
self._clear()
self.clean_url = new_url
return self.extract(check_url=check_url, http_equiv_refresh=False)
if head:
logger.debug("Got head: %s", len(head))
self._extract(head, self.clean_url, [
"extraction.techniques.FacebookOpengraphTags",
"extraction.techniques.TwitterSummaryCardTags",
"extraction.techniques.HeadTags"
])
else:
logger.debug("No head: %s", self.clean_url)
if config.GET_ALL_DATA or not self._is_complete():
body = self._get_tag(response, tag_name="body", encoding=encoding)
if body:
logger.debug("Got body: %s", len(body))
self._extract(body, self.clean_url, [
"extraction.techniques.HTML5SemanticTags",
"extraction.techniques.SemanticTags"
])
else:
logger.debug("No body: %s", self.clean_url)
if not head and not body:
raise HTMLParseError('No head nor body tags found.')
del self._html
|
https://github.com/svven/summary/blob/3a6c43d2da08a7452f6b76d1813aa70ba36b8a54/summary/__init__.py#L227-L300
|
get inner html
|
python
|
def _get_html_contents(html):
"""Process a HTML block and detects whether it is a code block,
a math block, or a regular HTML block."""
parser = MyHTMLParser()
parser.feed(html)
if parser.is_code:
return ('code', parser.data.strip())
elif parser.is_math:
return ('math', parser.data.strip())
else:
return '', ''
|
https://github.com/rossant/ipymd/blob/d87c9ebc59d67fe78b0139ee00e0e5307682e303/ipymd/formats/atlas.py#L47-L57
|
get inner html
|
python
|
def _get_element_text(self, element):
"""
Return the textual content of the element and its children
"""
text = ''
if element.text is not None:
text += element.text
for child in element.getchildren():
text += self._get_element_text(child)
if element.tail is not None:
text += element.tail
return text
|
https://github.com/jbarlow83/OCRmyPDF/blob/79c84eefa353632a3d7ccddbd398c6678c1c1777/src/ocrmypdf/hocrtransform.py#L103-L114
|
get inner html
|
python
|
def get_internal_urls(self):
"""
URL's, which may point to edeposit, aleph, kramerius and so on.
Fields ``856u40``, ``998a`` and ``URLu``.
Returns:
list: List of internal URLs.
"""
internal_urls = self.get_subfields("856", "u", i1="4", i2="0")
internal_urls.extend(self.get_subfields("998", "a"))
internal_urls.extend(self.get_subfields("URL", "u"))
return map(lambda x: x.replace("&", "&"), internal_urls)
|
https://github.com/edeposit/marcxml_parser/blob/6d1c77c61fc2827b71f1b3d5aa3332d7f5807820/src/marcxml_parser/query.py#L539-L552
|
get inner html
|
python
|
def getElementById(id: str) -> Optional[Node]:
"""Get element with ``id``."""
elm = Element._elements_with_id.get(id)
return elm
|
https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/document.py#L31-L34
|
get inner html
|
python
|
def get_element_by_id(self, id, *default):
"""
Get the first element in a document with the given id. If none is
found, return the default argument if provided or raise KeyError
otherwise.
Note that there can be more than one element with the same id,
and this isn't uncommon in HTML documents found in the wild.
Browsers return only the first match, and this function does
the same.
"""
try:
# FIXME: should this check for multiple matches?
# browsers just return the first one
return _id_xpath(self, id=id)[0]
except IndexError:
if default:
return default[0]
else:
raise KeyError(id)
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py#L256-L275
|
get inner html
|
python
|
def raw_html(self) -> _RawHTML:
"""Bytes representation of the HTML content.
(`learn more <http://www.diveintopython3.net/strings.html>`_).
"""
if self._html:
return self._html
else:
return etree.tostring(self.element, encoding='unicode').strip().encode(self.encoding)
|
https://github.com/kennethreitz/requests-html/blob/b59a9f2fb9333d7d467154a0fd82978efdb9d23b/requests_html.py#L90-L97
|
get inner html
|
python
|
def get_elements(self, tag_name, attribute):
"""
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute
"""
l = []
for i in self.xml:
for item in self.xml[i].getElementsByTagName(tag_name):
value = item.getAttributeNS(NS_ANDROID_URI, attribute)
value = self.format_value(value)
l.append(str(value))
return l
|
https://github.com/zeaphoo/reston/blob/96502487b2259572df55237c9526f92627465088/reston/core/apk.py#L474-L488
|
get inner html
|
python
|
def get_html_string(self):
"""Generates if need be and returns a simpler html string with
extracted text"""
if self.__htmltree is not None:
return htmltostring(self.__htmltree)
else:
self.__make_tree()
return htmltostring(self.__htmltree)
|
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/eatiht_trees.py#L260-L267
|
get inner html
|
python
|
def get_body_text(self):
""" Parse the body html and returns the body text using bs4
:return: body text
:rtype: str
"""
if self.body_type != 'HTML':
return self.body
try:
soup = bs(self.body, 'html.parser')
except RuntimeError:
return self.body
else:
return soup.body.text
|
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/calendar.py#L1429-L1443
|
get inner html
|
python
|
def get_html(url):
"""Gets the HTML for the given URL using a GET request.
:url: the absolute URL of the desired page.
:returns: a string of HTML.
"""
global last_request_time
with throttle_process_lock:
with throttle_thread_lock:
# sleep until THROTTLE_DELAY secs have passed since last request
wait_left = THROTTLE_DELAY - (time.time() - last_request_time.value)
if wait_left > 0:
time.sleep(wait_left)
# make request
response = requests.get(url)
# update last request time for throttling
last_request_time.value = time.time()
# raise ValueError on 4xx status code, get rid of comments, and return
if 400 <= response.status_code < 500:
raise ValueError(
'Status Code {} received fetching URL "{}"'
.format(response.status_code, url)
)
html = response.text
html = html.replace('<!--', '').replace('-->', '')
return html
|
https://github.com/mdgoldberg/sportsref/blob/09f11ac856a23c96d666d1d510bb35d6f050b5c3/sportsref/utils.py#L26-L55
|
get inner html
|
python
|
def get_inner_edges(self):
""" Returns a list of the internal edges of the tree. """
inner_edges = [e for e in self._tree.preorder_edge_iter() if e.is_internal()
and e.head_node and e.tail_node]
return inner_edges
|
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/tree.py#L897-L901
|
get inner html
|
python
|
def get_urls(htmlDoc, limit=200):
'''takes in html document as string, returns links to dots'''
soup = BeautifulSoup( htmlDoc )
anchors = soup.findAll( 'a' )
urls = {}
counter = 0
for i,v in enumerate( anchors ):
href = anchors[i].get( 'href' )
if ('dots' in href and counter < limit):
href = href.split('/')[2]
text = anchors[i].text.split(' ')[0].replace('/', '_')
urls[ text ] = href
counter += 1
return urls
|
https://github.com/gnullByte/dotcolors/blob/4b09ff9862b88b3125fe9cd86aa054694ed3e46e/dotcolors/getdots.py#L42-L59
|
get inner html
|
python
|
def getElementById(self, _id, root='root'):
'''
getElementById - Searches and returns the first (should only be one) element with the given ID.
@param id <str> - A string of the id attribute.
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root' [default], the root of the parsed tree will be used.
'''
(root, isFromRoot) = self._handleRootArg(root)
if isFromRoot is True and root.id == _id:
return root
getElementById = self.getElementById
for child in root.children:
if child.getAttribute('id') == _id:
return child
potential = getElementById(_id, child)
if potential is not None:
return potential
return None
|
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Parser.py#L334-L356
|
get inner html
|
python
|
def to_html_string(self):
"""
Returns an etree HTML node with a document describing the process. This
is only supported if the editor provided an SVG representation.
"""
html = ET.Element('html')
head = ET.SubElement(html, 'head')
title = ET.SubElement(head, 'title')
title.text = self.description
body = ET.SubElement(html, 'body')
h1 = ET.SubElement(body, 'h1')
h1.text = self.description
span = ET.SubElement(body, 'span')
span.text = '___CONTENT___'
html_text = ET.tostring(html)
svg_content = ''
svg_done = set()
for spec in self.get_specs_depth_first():
if spec.svg and spec.svg not in svg_done:
svg_content += '<p>' + spec.svg + "</p>"
svg_done.add(spec.svg)
return html_text.replace('___CONTENT___', svg_content)
|
https://github.com/knipknap/SpiffWorkflow/blob/f0af7f59a332e0619e4f3c00a7d4a3d230760e00/SpiffWorkflow/bpmn/specs/BpmnProcessSpec.py#L141-L164
|
get inner html
|
python
|
def getElementByWdomId(id: str) -> Optional[WebEventTarget]:
"""Get element with ``wdom_id``."""
if not id:
return None
elif id == 'document':
return get_document()
elif id == 'window':
return get_document().defaultView
elm = WdomElement._elements_with_wdom_id.get(id)
return elm
|
https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/document.py#L37-L46
|
get inner html
|
python
|
def html(self, unicode=False):
""" Return HTML of element """
html = lxml.html.tostring(self.element, encoding=self.encoding)
if unicode:
html = html.decode(self.encoding)
return html
|
https://github.com/romankoblov/leaf/blob/e042d91ec462c834318d03f199fcc4a9f565cb84/leaf/__init__.py#L36-L41
|
get inner html
|
python
|
def _get_element_attr_or_none(document, selector, attribute):
"""
Using a CSS selector, get the element and return the given attribute value, or None if no element.
Args:
document (HTMLElement) - HTMLElement document
selector (str) - CSS selector
attribute (str) - The attribute to get from the element
"""
element = document.cssselect(selector)
if element:
return element[0].get(attribute)
return None
|
https://github.com/jaywink/federation/blob/59d31bb37e662891dbea72c1dee05dc53146c78b/federation/utils/diaspora.py#L125-L137
|
get inner html
|
python
|
def get_encoded_text(container, xpath):
"""Return text for element at xpath in the container xml if it is there.
Parameters
----------
container : xml.etree.ElementTree.Element
The element to be searched in.
xpath : str
The path to be looked for.
Returns
-------
result : str
"""
try:
return "".join(container.find(xpath, ns).itertext())
except AttributeError:
return None
|
https://github.com/scopus-api/scopus/blob/27ce02dd3095bfdab9d3e8475543d7c17767d1ab/scopus/utils/get_encoded_text.py#L15-L33
|
get inner html
|
python
|
def _get_page_elements(self):
"""Return page elements and page objects of this page object
:returns: list of page elements and page objects
"""
page_elements = []
for attribute, value in list(self.__dict__.items()) + list(self.__class__.__dict__.items()):
if attribute != 'parent' and isinstance(value, CommonObject):
page_elements.append(value)
return page_elements
|
https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/pageobjects/page_object.py#L60-L69
|
get inner html
|
python
|
def get_text(html_content, display_images=False, deduplicate_captions=False, display_links=False):
'''
::param: html_content
::returns:
a text representation of the html content.
'''
html_content = html_content.strip()
if not html_content:
return ""
# strip XML declaration, if necessary
if html_content.startswith('<?xml '):
html_content = RE_STRIP_XML_DECLARATION.sub('', html_content, count=1)
html_tree = fromstring(html_content)
parser = Inscriptis(html_tree, display_images=display_images, deduplicate_captions=deduplicate_captions, display_links=display_links)
return parser.get_text()
|
https://github.com/weblyzard/inscriptis/blob/0d04f81e69d643bb5f470f33b4ca67b62fc1037c/src/inscriptis/__init__.py#L18-L34
|
get inner html
|
python
|
def get_anchor_contents(markup):
"""
Given HTML markup, return a list of href inner html for each anchor tag.
"""
soup = BeautifulSoup(markup, 'lxml')
return ['%s' % link.contents[0] for link in soup.find_all('a')]
|
https://github.com/alexhayes/django-toolkit/blob/b64106392fad596defc915b8235fe6e1d0013b5b/django_toolkit/markup/html.py#L10-L15
|
get inner html
|
python
|
def getElementById(self, _id, root='root', useIndex=True):
'''
getElementById - Searches and returns the first (should only be one) element with the given ID.
@param id <str> - A string of the id attribute.
@param root <AdvancedTag/'root'> - Search starting at a specific node, if provided. if string 'root', the root of the parsed tree will be used.
@param useIndex <bool> If useIndex is True and ids are indexed [see constructor] only the index will be used. Otherwise a full search is performed.
'''
(root, isFromRoot) = self._handleRootArg(root)
if self.useIndex is True and self.indexIDs is True:
element = self._idMap.get(_id, None)
if isFromRoot is False and element is not None:
if self._hasTagInParentLine(element, root) is False:
element = None
return element
return AdvancedHTMLParser.getElementById(self, _id, root)
|
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Parser.py#L1229-L1251
|
get inner html
|
python
|
def get_internal_header(request: HttpRequest) -> str:
"""
Return request's 'X_POLYAXON_INTERNAL:' header, as a bytestring.
"""
return get_header(request=request, header_service=conf.get('HEADERS_INTERNAL'))
|
https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/scopes/authentication/internal.py#L49-L53
|
get inner html
|
python
|
def get_elements(self, tag_name, attribute, with_namespace=True):
"""
Deprecated: use `get_all_attribute_value()` instead
Return elements in xml files which match with the tag name and the specific attribute
:param tag_name: a string which specify the tag name
:param attribute: a string which specify the attribute
"""
for i in self.xml:
if self.xml[i] is None:
continue
for item in self.xml[i].findall('.//' + tag_name):
if with_namespace:
value = item.get(self._ns(attribute))
else:
value = item.get(attribute)
# There might be an attribute without the namespace
if value:
yield self._format_value(value)
|
https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/core.py#L614-L631
|
get inner html
|
python
|
def get_html_link(self, obj):
"""Returns an html formatted link for the given object
"""
return "<a href='{}'>{}</a>".format(api.get_url(obj), api.get_id(obj))
|
https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/browser/workflow/analysisrequest.py#L256-L259
|
get inner html
|
python
|
def get_element(self, tag_name, attribute, **attribute_filter):
"""
:Deprecated: use `get_attribute_value()` instead
Return element in xml files which match with the tag name and the specific attribute
:param tag_name: specify the tag name
:type tag_name: string
:param attribute: specify the attribute
:type attribute: string
:rtype: string
"""
for i in self.xml:
if self.xml[i] is None:
continue
tag = self.xml[i].findall('.//' + tag_name)
if len(tag) == 0:
return None
for item in tag:
skip_this_item = False
for attr, val in list(attribute_filter.items()):
attr_val = item.get(self._ns(attr))
if attr_val != val:
skip_this_item = True
break
if skip_this_item:
continue
value = item.get(self._ns(attribute))
if value is not None:
return value
return None
|
https://github.com/appknox/pyaxmlparser/blob/8ffe43e81a534f42c3620f3c1e3c6c0181a066bd/pyaxmlparser/core.py#L652-L683
|
get inner html
|
python
|
def _get_header(self, header):
"""
Gets the html header
"""
if header is None:
html = self.header()
else:
html = header
return html
|
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/report.py#L162-L170
|
get inner html
|
python
|
def _get_element_by_id(self, resource_name, eclass, id):
"""Get a single element matching an id"""
elements = self._get_elements(resource_name, eclass, id=id)
if not elements:
raise ValueError("No resource matching: {0}".format(id))
if len(elements) == 1:
return elements[0]
raise ValueError("Multiple resources matching: {0}".format(id))
|
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/rest.py#L154-L161
|
get inner html
|
python
|
def get_text(self, locator, params=None, timeout=None, visible=True):
"""
Get text or value from element based on locator with optional parameters.
:param locator: element identifier
:param params: (optional) locator parameters
:param timeout: (optional) time to wait for text (default: None)
:param visible: should element be visible before getting text (default: True)
:return: element text, value or empty string
"""
element = locator
if not isinstance(element, WebElement):
element = self.get_present_element(locator, params, timeout, visible)
if element and element.text:
return element.text
else:
try:
return element.get_attribute('value')
except AttributeError:
return ""
|
https://github.com/Projectplace/basepage/blob/735476877eb100db0981590a6d12140e68652167/basepage/base_page.py#L215-L235
|
get inner html
|
python
|
def wrap_inner(node, tag):
"""Wrap the given tag around the contents of a node."""
children = list(node.childNodes)
wrap_node = node.ownerDocument.createElement(tag)
for c in children:
wrap_node.appendChild(c)
node.appendChild(wrap_node)
|
https://github.com/christian-oudard/htmltreediff/blob/0e28f56492ae7e69bb0f74f9a79a8909a5ad588d/htmltreediff/util.py#L358-L364
|
get inner html
|
python
|
def get_elements(self, element, xmlns=None):
"""
Return a list of elements those match the searching condition.
If the XML input has namespaces, elements and attributes with prefixes
in the form prefix:sometag get expanded to {namespace}element where the
prefix is replaced by the full URI. Also, if there is a default
namespace, that full URI gets prepended to all of the non-prefixed tags.
Element names can contain letters, digits, hyphens, underscores, and
periods. But element names must start with a letter or underscore.
Here the while-clause is to set searching condition from
`/element1/element2` to `/{namespace}element1/{namespace}/element2`
Parameters:
element: Searching condition to search certain elements in an XML
file. For more details about how to set searching
condition, refer to section `19.7.2.1. Example` and
`19.7.2.2. Supported XPath syntax` in
https://docs.python.org/2/library/xml.etree.elementtree.html
xmlns: XML namespace, default value to None.
None means that xmlns equals to the `self.xmlns` (default
namespace) instead of "" all the time. Only string type
parameter (including "") will be regarded as a valid xml
namespace.
Returns:
(list): List of elements those match the searching condition
"""
real_element = ""
real_xmlns = ""
if xmlns is None:
real_xmlns = "{" + self.xmlns + "}" if self.xmlns else ""
else:
real_xmlns = "{" + xmlns + "}"
while "/" in element:
l = element.split("/", 1)
element = l[1]
real_element += l[0] + "/"
if element[0].isalpha() or element[0] == "_":
real_element += real_xmlns
real_element += element
return self.dom.findall(real_element)
|
https://github.com/RedHatInsights/insights-core/blob/b57cbf8ed7c089672426ede0441e0a4f789ef4a1/insights/core/__init__.py#L638-L680
|
get inner html
|
python
|
def get_element_with_id(self, id):
"""Return the element with the specified ID."""
# Should we maintain a hashmap of ids to make this more efficient? Probably overkill.
# TODO: Elements can contain nested elements (captions, footnotes, table cells, etc.)
return next((el for el in self.elements if el.id == id), None)
|
https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/doc/document.py#L300-L304
|
get inner html
|
python
|
def get_html(url,
headers=None,
timeout=None,
errors="strict",
wait_time=None,
driver=None,
zillow_only=False,
cache_only=False,
zillow_first=False,
cache_first=False,
random=False,
**kwargs):
"""
Use Google Cached Url.
:param cache_only: if True, then real zillow site will never be used.
:param driver: selenium browser driver。
"""
if wait_time is None:
wait_time = Config.Crawler.wait_time
# prepare url
cache_url1 = prefix + url + "/"
cache_url2 = prefix + url
zillow_url = url
only_flags = [zillow_only, cache_only]
if sum(only_flags) == 0:
first_flags = [zillow_first, cache_first]
if sum(first_flags) == 0:
if random:
if randint(0, 1):
all_url = [zillow_url, cache_url1, cache_url2]
else:
all_url = [cache_url1, cache_url2, zillow_url]
else:
all_url = [zillow_url, cache_url1, cache_url2]
elif sum(first_flags) == 1:
if zillow_first:
all_url = [zillow_url, cache_url1, cache_url2]
elif cache_first:
all_url = [cache_url1, cache_url2, zillow_url]
else:
raise ValueError(
"Only zero or one `xxx_first` argument could be `True`!")
elif sum(only_flags) == 1:
if zillow_only:
all_url = [zillow_url, ]
elif cache_only:
all_url = [cache_url1, cache_url2]
else:
raise ValueError(
"Only zero or one `xxx_only` argument could be `True`!")
for url in all_url:
try:
html = _get_html(url, headers, timeout, errors,
wait_time, driver, **kwargs)
return html
except Exception as e:
pass
raise e
|
https://github.com/MacHu-GWU/crawl_zillow-project/blob/c6d7ca8e4c80e7e7e963496433ef73df1413c16e/crawl_zillow/spider.py#L85-L148
|
get inner html
|
python
|
def _value__get(self):
"""
Get/set the value (which is the contents of this element)
"""
content = self.text or ''
if self.tag.startswith("{%s}" % XHTML_NAMESPACE):
serialisation_method = 'xml'
else:
serialisation_method = 'html'
for el in self:
# it's rare that we actually get here, so let's not use ''.join()
content += etree.tostring(
el, method=serialisation_method, encoding='unicode')
return content
|
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/lxml/html/__init__.py#L1100-L1113
|
get inner html
|
python
|
def _get_content(self, url):
"Get HTML content"
target_url = self._db_url + '/' + unquote(url) # .encode('utf-8'))
log.debug("Opening '{0}'".format(target_url))
try:
f = self.opener.open(target_url)
except HTTPError as e:
log.error("HTTP error, your session may be expired.")
log.error(e)
if input("Request new permanent session and retry? (y/n)") in 'yY':
self.request_permanent_session()
return self._get_content(url)
else:
return None
log.debug("Accessing '{0}'".format(target_url))
try:
content = f.read()
except IncompleteRead as icread:
log.critical(
"Incomplete data received from the DB, " +
"the data could be corrupted."
)
content = icread.partial
log.debug("Got {0} bytes of data.".format(len(content)))
return content.decode('utf-8')
|
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/db.py#L369-L393
|
get inner html
|
python
|
def getElementByWdomId(self, id: Union[str]) -> Optional[WebEventTarget]:
"""Get an element node with ``wdom_id``.
If this document does not have the element with the id, return None.
"""
elm = getElementByWdomId(id)
if elm and elm.ownerDocument is self:
return elm
return None
|
https://github.com/miyakogi/wdom/blob/a21bcd23e94baceee71161829f6897bee3fd39c1/wdom/document.py#L297-L305
|
get inner html
|
python
|
def _get_element(name, element_type, server=None, with_properties=True):
'''
Get an element with or without properties
'''
element = {}
name = quote(name, safe='')
data = _api_get('{0}/{1}'.format(element_type, name), server)
# Format data, get properties if asked, and return the whole thing
if any(data['extraProperties']['entity']):
for key, value in data['extraProperties']['entity'].items():
element[key] = value
if with_properties:
element['properties'] = _get_element_properties(name, element_type)
return element
return None
|
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/glassfish.py#L186-L201
|
get inner html
|
python
|
def get_text(self, xml, name):
"""
Gets the element's text value from the XML object provided.
"""
nodes = xml.getElementsByTagName("wp:comment_" + name)[0].childNodes
accepted_types = [Node.CDATA_SECTION_NODE, Node.TEXT_NODE]
return "".join([n.data for n in nodes if n.nodeType in accepted_types])
|
https://github.com/minhhoit/yacms/blob/2921b706b7107c6e8c5f2bbf790ff11f85a2167f/yacms/blog/management/commands/import_wordpress.py#L28-L34
|
get inner html
|
python
|
def extract_text(html,
guess_punct_space=True,
guess_layout=True,
newline_tags=NEWLINE_TAGS,
double_newline_tags=DOUBLE_NEWLINE_TAGS):
"""
Convert html to text, cleaning invisible content such as styles.
Almost the same as normalize-space xpath, but this also
adds spaces between inline elements (like <span>) which are
often used as block elements in html markup, and adds appropriate
newlines to make output better formatted.
html should be a unicode string or an already parsed lxml.html element.
``html_text.etree_to_text`` is a lower-level function which only accepts
an already parsed lxml.html Element, and is not doing html cleaning itself.
When guess_punct_space is True (default), no extra whitespace is added
for punctuation. This has a slight (around 10%) performance overhead
and is just a heuristic.
When guess_layout is True (default), a newline is added
before and after ``newline_tags`` and two newlines are added before
and after ``double_newline_tags``. This heuristic makes the extracted
text more similar to how it is rendered in the browser.
Default newline and double newline tags can be found in
`html_text.NEWLINE_TAGS` and `html_text.DOUBLE_NEWLINE_TAGS`.
"""
if html is None:
return ''
cleaned = _cleaned_html_tree(html)
return etree_to_text(
cleaned,
guess_punct_space=guess_punct_space,
guess_layout=guess_layout,
newline_tags=newline_tags,
double_newline_tags=double_newline_tags,
)
|
https://github.com/TeamHG-Memex/html-text/blob/871d4dbe9f4f99e5f041110c60458adcaae6fab4/html_text/html_text.py#L180-L219
|
get inner html
|
python
|
def html(self):
"""
Build HTML documentation.
"""
ret_code = self._sphinx_build('html')
zip_fname = os.path.join(BUILD_PATH, 'html', 'pandas.zip')
if os.path.exists(zip_fname):
os.remove(zip_fname)
if self.single_doc_html is not None:
self._open_browser(self.single_doc_html)
else:
self._add_redirects()
return ret_code
|
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/make.py#L214-L227
|
get inner html
|
python
|
def get_html_text_editor(
name,
id=None,
content='',
textual_content=None,
width='300px',
height='200px',
enabled=True,
file_upload_url=None,
toolbar_set="Basic",
custom_configurations_path='/js/ckeditor/invenio-ckeditor-config.js',
ln=None):
"""
Returns a wysiwyg editor (CKEditor) to embed in html pages.
Fall back to a simple textarea when the library is not installed,
or when the user's browser is not compatible with the editor, or
when 'enable' is False, or when javascript is not enabled.
NOTE that the output also contains a hidden field named
'editor_type' that contains the kind of editor used, 'textarea' or
'ckeditor'.
Based on 'editor_type' you might want to take different actions,
like replace CRLF with <br/> when editor_type equals to
'textarea', but not when editor_type equals to 'ckeditor'.
@param name: *str* the name attribute of the returned editor
@param id: *str* the id attribute of the returned editor (when
applicable)
@param content: *str* the default content of the editor.
@param textual_content: *str* a content formatted for the case where the
wysiwyg editor is not available for user. When not
specified, use value of 'content'
@param width: *str* width of the editor in an html compatible unit:
Eg: '400px', '50%'.
@param height: *str* height of the editor in an html compatible unit:
Eg: '400px', '50%'.
@param enabled: *bool* if the wysiwyg editor is return (True) or if a
simple texteara is returned (False)
@param file_upload_url: *str* the URL used to upload new files via the
editor upload panel. You have to implement the
handler for your own use. The URL handler will get
form variables 'File' as POST for the uploaded file,
and 'Type' as GET for the type of file ('file',
'image', 'flash', 'media')
When value is not given, the file upload is disabled.
@param toolbar_set: *str* the name of the toolbar layout to
use. CKeditor comes by default with 'Basic' and
'Default'. To define other sets, customize the
config file in
/opt/cds-invenio/var/www/ckeditor/invenio-ckconfig.js
@param custom_configurations_path: *str* value for the CKeditor config
variable 'CustomConfigurationsPath',
which allows to specify the path of a
file that contains a custom configuration
for the editor. The path is relative to
/opt/invenio/var/www/
@return: the HTML markup of the editor
"""
ln = default_ln(ln)
if textual_content is None:
textual_content = content
editor = ''
if enabled and ckeditor_available:
# Prepare upload path settings
file_upload_script = ''
if file_upload_url is not None:
file_upload_script = ''',
filebrowserLinkUploadUrl: '%(file_upload_url)s',
filebrowserImageUploadUrl: '%(file_upload_url)s?type=Image',
filebrowserFlashUploadUrl: '%(file_upload_url)s?type=Flash'
''' % {'file_upload_url': file_upload_url}
# Prepare code to instantiate an editor
editor += '''
<script type="text/javascript" language="javascript">//<![CDATA[
/* Load the script only once, or else multiple instance of the editor on the same page will not work */
var INVENIO_CKEDITOR_ALREADY_LOADED
if (INVENIO_CKEDITOR_ALREADY_LOADED != 1) {
document.write('<script type="text/javascript" src="%(CFG_SITE_URL)s/vendors/ckeditor/ckeditor.js"><\/script>');
INVENIO_CKEDITOR_ALREADY_LOADED = 1;
}
//]]></script>
<input type="hidden" name="editor_type" id="%(id)seditortype" value="textarea" />
<textarea rows="100" cols="80" id="%(id)s" name="%(name)s" style="width:%(width)s;height:%(height)s">%(textual_content)s</textarea>
<textarea rows="100" cols="80" id="%(id)shtmlvalue" name="%(name)shtmlvalue" style="display:none;width:%(width)s;height:%(height)s">%(html_content)s</textarea>
<script type="text/javascript">//<![CDATA[
var CKEDITOR_BASEPATH = '/ckeditor/';
CKEDITOR.replace( '%(name)s',
{customConfig: '%(custom_configurations_path)s',
toolbar: '%(toolbar)s',
width: '%(width)s',
height:'%(height)s',
language: '%(ln)s'
%(file_upload_script)s
});
CKEDITOR.on('instanceReady',
function( evt )
{
/* If CKeditor was correctly loaded, display the nice HTML representation */
var oEditor = evt.editor;
editor_id = oEditor.id
editor_name = oEditor.name
var html_editor = document.getElementById(editor_name + 'htmlvalue');
oEditor.setData(html_editor.value);
var editor_type_field = document.getElementById(editor_name + 'editortype');
editor_type_field.value = 'ckeditor';
var writer = oEditor.dataProcessor.writer;
writer.indentationChars = ''; /*Do not indent source code with tabs*/
oEditor.resetDirty();
/* Workaround: http://dev.ckeditor.com/ticket/3674 */
evt.editor.on( 'contentDom', function( ev )
{
ev.removeListener();
evt.editor.resetDirty();
} );
/* End workaround */
})
//]]></script>
''' % \
{'textual_content': cgi.escape(textual_content),
'html_content': content,
'width': width,
'height': height,
'name': name,
'id': id or name,
'custom_configurations_path': custom_configurations_path,
'toolbar': toolbar_set,
'file_upload_script': file_upload_script,
'CFG_SITE_URL': cfg['CFG_SITE_URL'],
'ln': ln}
else:
# CKedior is not installed
textarea = '<textarea rows="100" cols="80" %(id)s name="%(name)s" style="width:%(width)s;height:%(height)s">%(content)s</textarea>' \
% {'content': cgi.escape(textual_content),
'width': width,
'height': height,
'name': name,
'id': id and ('id="%s"' % id) or ''}
editor += textarea
editor += '<input type="hidden" name="editor_type" value="textarea" />'
return editor
|
https://github.com/inveniosoftware-attic/invenio-utils/blob/9a1c6db4e3f1370901f329f510480dd8df188296/invenio_utils/html.py#L498-L658
|
get inner html
|
python
|
def _get_crawled_urls(self, handle, request):
"""
Main method where the crawler html content is parsed with
beautiful soup and out of the DOM, we get the urls
"""
try:
content = six.text_type(handle.open(request).read(), "utf-8",
errors="replace")
soup = BeautifulSoup(content, "html.parser")
tags = soup('a')
for tag in tqdm(tags):
href = tag.get("href")
if href is not None:
url = urllib.parse.urljoin(self.url, escape(href))
if url not in self:
self.urls.append(url)
except urllib.request.HTTPError as error:
if error.code == 404:
logger.warning("ERROR: %s -> %s for %s" % (error, error.url, self.url))
else:
logger.warning("ERROR: %s for %s" % (error, self.url))
except urllib.request.URLError as error:
logger.warning("ERROR: %s for %s" % (error, self.url))
raise urllib.request.URLError("URL entered is Incorrect")
|
https://github.com/vinitkumar/pycrawler/blob/d3fe6d2da1469fc701c4fe04df88cee9cc8cd9c3/linkfetcher.py#L48-L73
|
get inner html
|
python
|
def get_content(self):
"""
Returns content for cover page as HTML string. Content will be of type 'str' (Python 2) or 'bytes' (Python 3).
:Returns:
Returns content of this document.
"""
self.content = self.book.get_template('cover')
tree = parse_string(super(EpubCoverHtml, self).get_content())
tree_root = tree.getroot()
images = tree_root.xpath('//xhtml:img', namespaces={'xhtml': NAMESPACES['XHTML']})
images[0].set('src', self.image_name)
images[0].set('alt', self.title)
tree_str = etree.tostring(tree, pretty_print=True, encoding='utf-8', xml_declaration=True)
return tree_str
|
https://github.com/aerkalov/ebooklib/blob/305f2dd7f02923ffabf9586a5d16266113d00c4a/ebooklib/epub.py#L466-L486
|
get inner html
|
python
|
def getElementById(self, id):
'''
DOM API: Returns single element with matching id value.
'''
results = self.get(id=id)
if len(results) > 1:
raise ValueError('Multiple tags with id "%s".' % id)
elif results:
return results[0]
else:
return None
|
https://github.com/Knio/dominate/blob/1eb88f9fd797658eef83568a548e2ef9b546807d/dominate/dom1core.py#L40-L50
|
get inner html
|
python
|
def _get_elements(self, url, key, eclass, id=None, name=None):
"""Get elements matching `id` or `name`
Args:
url(str): url of children.
key(str): key in the returned JSON.
eclass(subclass type of :py:class:`_ResourceElement`): element class to create instances of.
id(str, optional): only return resources whose `id` property matches the given `id`
name(str, optional): only return resources whose `name` property matches the given `name`
Returns:
list(_ResourceElement): List of `eclass` instances
Raises:
ValueError: both `id` and `name` are specified together
"""
if id is not None and name is not None:
raise ValueError("id and name cannot specified together")
json_elements = self.rest_client.make_request(url)[key]
return [eclass(element, self.rest_client) for element in json_elements
if _exact_resource(element, id) and _matching_resource(element, name)]
|
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/rest_primitives.py#L113-L134
|
get inner html
|
python
|
def get_web_element(self, element):
"""Return the web element from a page element or its locator
:param element: either a WebElement, PageElement or element locator as a tuple (locator_type, locator_value)
:returns: WebElement object
"""
from toolium.pageelements.page_element import PageElement
if isinstance(element, WebElement):
web_element = element
elif isinstance(element, PageElement):
web_element = element.web_element
elif isinstance(element, tuple):
web_element = self.driver_wrapper.driver.find_element(*element)
else:
web_element = None
return web_element
|
https://github.com/Telefonica/toolium/blob/56847c243b3a98876df74c184b75e43f8810e475/toolium/utils.py#L638-L653
|
get inner html
|
python
|
def get_html(self, card_id):
"""
图文消息群发卡券
"""
result = self._post(
'card/mpnews/gethtml',
data={
'card_id': card_id
},
result_processor=lambda x: x['content']
)
return result
|
https://github.com/jxtech/wechatpy/blob/4df0da795618c0895a10f1c2cde9e9d5c0a93aaa/wechatpy/client/api/card.py#L86-L97
|
get inner html
|
python
|
def __get_img(self):
"""
Returns an image object corresponding to the page
"""
with self.fs.open(self.__img_path, 'rb') as fd:
img = PIL.Image.open(fd)
img.load()
return img
|
https://github.com/openpaperwork/paperwork-backend/blob/114b831e94e039e68b339751fd18250877abad76/paperwork_backend/img/page.py#L129-L136
|
get inner html
|
python
|
def getHTMLPopup(self, oid):
"""
The htmlPopup resource provides details about the HTML pop-up
authored by the user using ArcGIS for Desktop.
Input:
oid - object id of the feature where the HTML pop-up
Output:
"""
if self.htmlPopupType != "esriServerHTMLPopupTypeNone":
popURL = self._url + "/%s/htmlPopup" % oid
params = {
'f' : "json"
}
return self._get(url=popURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
return ""
|
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/agol/services.py#L1949-L1968
|
get inner html
|
python
|
def getElementById(self, _id):
'''
getElementById - Search children of this tag for a tag containing an id
@param _id - String of id
@return - AdvancedTag or None
'''
for child in self.children:
if child.getAttribute('id') == _id:
return child
found = child.getElementById(_id)
if found is not None:
return found
return None
|
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Tags.py#L1781-L1795
|
get inner html
|
python
|
def tag(tagname, content='', attrs=None):
""" Helper for programmatically building HTML tags.
Note that this barely does any escaping, and will happily spit out
dangerous user input if used as such.
:param tagname:
Tag name of the DOM element we want to return.
:param content:
Optional content of the DOM element. If `None`, then the element is
self-closed. By default, the content is an empty string. Supports
iterables like generators.
:param attrs:
Optional dictionary-like collection of attributes for the DOM element.
Example::
>>> tag('div', content='Hello, world.')
u'<div>Hello, world.</div>'
>>> tag('script', attrs={'src': '/static/js/core.js'})
u'<script src="/static/js/core.js"></script>'
>>> tag('script', attrs=[('src', '/static/js/core.js'), ('type', 'text/javascript')])
u'<script src="/static/js/core.js" type="text/javascript"></script>'
>>> tag('meta', content=None, attrs=dict(content='"quotedquotes"'))
u'<meta content="\\\\"quotedquotes\\\\"" />'
>>> tag('ul', (tag('li', str(i)) for i in xrange(3)))
u'<ul><li>0</li><li>1</li><li>2</li></ul>'
"""
attrs_str = attrs and ' '.join(_generate_dom_attrs(attrs))
open_tag = tagname
if attrs_str:
open_tag += ' ' + attrs_str
if content is None:
return literal('<%s />' % open_tag)
content = ''.join(iterate(content, unless=(basestring, literal)))
return literal('<%s>%s</%s>' % (open_tag, content, tagname))
|
https://github.com/shazow/unstdlib.py/blob/e0632fe165cfbfdb5a7e4bc7b412c9d6f2ebad83/unstdlib/html.py#L107-L146
|
get inner html
|
python
|
def _get_element_by_id(self, url, key, eclass, id):
"""Get a single element matching an `id`
Args:
url(str): url of children.
key(str): key in the returned JSON.
eclass(subclass type of :py:class:`_ResourceElement`): element class to create instances of.
id(str): return resources whose `id` property matches the given `id`
Returns:
_ResourceElement: Element of type `eclass` matching the given `id`
Raises:
ValueError: No resource matches given `id` or multiple resources matching given `id`
"""
elements = self._get_elements(url, key, eclass, id=id)
if not elements:
raise ValueError("No resource matching: {0}".format(id))
if len(elements) == 1:
return elements[0]
raise ValueError("Multiple resources matching: {0}".format(id))
|
https://github.com/IBMStreams/pypi.streamsx/blob/abd67b4757120f6f805787fba390f53e9df9cdd8/streamsx/rest_primitives.py#L136-L156
|
get inner html
|
python
|
def get_text_from_html(html):
"""Returns a plaintext representation of HTML content."""
try:
soup = bs4.BeautifulSoup(html, "html.parser")
except: # pylint: disable=bare-except
# Some docs don't parse
return ""
# Remove script and style tags
for s in soup(["script", "style"]):
s.decompose()
return "\n".join([s for s in _soup_strings(soup)])
|
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/wikisum/html.py#L21-L32
|
get inner html
|
python
|
def get_attr_text(self):
"""Get html attr text to render in template"""
return ' '.join([
'{}="{}"'.format(key, value)
for key, value in self.attr.items()
])
|
https://github.com/krukas/Trionyx/blob/edac132cc0797190153f2e60bc7e88cb50e80da6/trionyx/layout.py#L358-L363
|
get inner html
|
python
|
def _get_page_content(self, response):
"""Given a :class:`requests.Response`, return the
:class:`xml.etree.Element` of the content `div`.
:param response: a :class:`requests.Response` to parse
:returns: the :class:`Element` of the first content `div` or `None`
"""
document = html5lib.parse(
response.content,
encoding=response.encoding,
treebuilder='etree',
namespaceHTMLElements=False
)
# etree doesn't fully support XPath, so we can't just search
# the attribute values for "content"
divs = document.findall(
".//body//div[@class]")
content_div = None
for div in divs:
if "content" in div.attrib['class'].split(' '):
content_div = div
break
# The `Element` object is False-y when there are no subelements,
# so compare to `None`
if content_div is None:
return None
return content_div
|
https://github.com/kennydo/nyaalib/blob/ab787b7ba141ed53d2ad978bf13eb7b8bcdd4b0d/nyaalib/__init__.py#L38-L65
|
get inner html
|
python
|
def end_element (self, tag):
"""
Print HTML end element.
@param tag: tag name
@type tag: string
@return: None
"""
tag = tag.encode(self.encoding, "ignore")
self.fd.write("</%s>" % tag)
|
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/HtmlParser/htmllib.py#L137-L146
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.