file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
read.go
|
"twitter:description" {
if _, exist := mapAttribute[metaName]; !exist {
mapAttribute[metaName] = metaContent
}
return
}
if metaProperty == "og:description" ||
metaProperty == "og:image" ||
metaProperty == "og:title" {
if _, exist := mapAttribute[metaProperty]; !exist {
mapAttribute[metaProperty] = metaContent
}
return
}
})
// Set final image
if _, exist := mapAttribute["og:image"]; exist {
metadata.Image = mapAttribute["og:image"]
} else if _, exist := mapAttribute["twitter:image"]; exist {
metadata.Image = mapAttribute["twitter:image"]
}
if metadata.Image != "" && strings.HasPrefix(metadata.Image, "//") {
metadata.Image = "http:" + metadata.Image
}
// Set final description
if _, exist := mapAttribute["description"]; exist {
metadata.Excerpt = mapAttribute["description"]
} else if _, exist := mapAttribute["og:description"]; exist {
metadata.Excerpt = mapAttribute["og:description"]
} else if _, exist := mapAttribute["twitter:description"]; exist
|
// Set final title
metadata.Title = r.getArticleTitle(doc)
if metadata.Title == "" {
if _, exist := mapAttribute["og:title"]; exist {
metadata.Title = mapAttribute["og:title"]
} else if _, exist := mapAttribute["twitter:title"]; exist {
metadata.Title = mapAttribute["twitter:title"]
}
}
return metadata
}
// Get the article title
func (r *readability) getArticleTitle(doc *goquery.Document) string {
// Get title tag
title := doc.Find("title").First().Text()
title = normalizeText(title)
originalTitle := title
// Create list of separator
separators := []string{`|`, `-`, `\`, `/`, `>`, `»`}
hierarchialSeparators := []string{`\`, `/`, `>`, `»`}
// If there's a separator in the title, first remove the final part
titleHadHierarchicalSeparators := false
if idx, sep := findSeparator(title, separators...); idx != -1 {
titleHadHierarchicalSeparators = hasSeparator(title, hierarchialSeparators...)
index := strings.LastIndex(originalTitle, sep)
title = originalTitle[:index]
// If the resulting title is too short (3 words or fewer), remove
// the first part instead:
if len(strings.Fields(title)) < 3 {
index = strings.Index(originalTitle, sep)
title = originalTitle[index+1:]
}
} else if strings.Contains(title, ": ") {
// Check if we have an heading containing this exact string, so we
// could assume it's the full title.
existInHeading := false
doc.Find("h1,h2").EachWithBreak(func(_ int, heading *goquery.Selection) bool {
headingText := strings.TrimSpace(heading.Text())
if headingText == title {
existInHeading = true
return false
}
return true
})
// If we don't, let's extract the title out of the original title string.
if !existInHeading {
index := strings.LastIndex(originalTitle, ":")
title = originalTitle[index+1:]
// If the title is now too short, try the first colon instead:
if len(strings.Fields(title)) < 3 {
index = strings.Index(originalTitle, ":")
title = originalTitle[index+1:]
// But if we have too many words before the colon there's something weird
// with the titles and the H tags so let's just use the original title instead
} else {
index = strings.Index(originalTitle, ":")
title = originalTitle[:index]
if len(strings.Fields(title)) > 5 {
title = originalTitle
}
}
}
} else if strLen(title) > 150 || strLen(title) < 15 {
hOne := doc.Find("h1").First()
if hOne != nil {
title = normalizeText(hOne.Text())
}
}
// If we now have 4 words or fewer as our title, and either no
// 'hierarchical' separators (\, /, > or ») were found in the original
// title or we decreased the number of words by more than 1 word, use
// the original title.
curTitleWordCount := len(strings.Fields(title))
noSeparatorWordCount := len(strings.Fields(removeSeparator(originalTitle, separators...)))
if curTitleWordCount <= 4 && (!titleHadHierarchicalSeparators || curTitleWordCount != noSeparatorWordCount-1) {
title = originalTitle
}
return title
}
// Using a variety of metrics (content score, classname, element types), find the content that is
// most likely to be the stuff a user wants to read. Then return it wrapped up in a div.
func (r *readability) getArticleContent(doc *goquery.Document) *goquery.Selection {
// First, node prepping. Trash nodes that look cruddy (like ones with the
// class name "comment", etc), and turn divs into P tags where they have been
// used inappropriately (as in, where they contain no other block level elements.)
doc.Find("*").Each(func(i int, s *goquery.Selection) {
matchString := s.AttrOr("class", "") + " " + s.AttrOr("id", "")
// If byline, remove this element
if rel := s.AttrOr("rel", ""); rel == "author" || byline.MatchString(matchString) {
s.Remove()
return
}
// Remove unlikely candidates
if unlikelyCandidates.MatchString(matchString) &&
!okMaybeItsACandidate.MatchString(matchString) &&
!s.Is("body") && !s.Is("a") {
s.Remove()
return
}
if unlikelyElements.MatchString(r.getTagName(s)) {
s.Remove()
return
}
// Remove DIV, SECTION, and HEADER nodes without any content(e.g. text, image, video, or iframe).
if s.Is("div,section,header,h1,h2,h3,h4,h5,h6") && r.isElementEmpty(s) {
s.Remove()
return
}
// Turn all divs that don't have children block level elements into p's
if s.Is("div") {
sHTML, _ := s.Html()
if !divToPElements.MatchString(sHTML) {
s.Nodes[0].Data = "p"
}
}
})
// Loop through all paragraphs, and assign a score to them based on how content-y they look.
// Then add their score to their parent node.
// A score is determined by things like number of commas, class names, etc. Maybe eventually link density.
r.candidates = make(map[string]candidateItem)
doc.Find("p").Each(func(i int, s *goquery.Selection) {
// If this paragraph is less than 25 characters, don't even count it.
innerText := normalizeText(s.Text())
if strLen(innerText) < 25 {
return
}
// Exclude nodes with no ancestor.
ancestors := r.getNodeAncestors(s, 3)
if len(ancestors) == 0 {
return
}
// Calculate content score
// Add a point for the paragraph itself as a base.
contentScore := 1.0
// Add points for any commas within this paragraph.
contentScore += float64(strings.Count(innerText, ","))
contentScore += float64(strings.Count(innerText, ","))
// For every 100 characters in this paragraph, add another point. Up to 3 points.
contentScore += math.Min(math.Floor(float64(strLen(innerText)/100)), 3)
// Initialize and score ancestors.
for level, ancestor := range ancestors {
// Node score divider:
// - parent: 1 (no division)
// - grandparent: 2
// - great grandparent+: ancestor level * 3
scoreDivider := 0
if level == 0 {
scoreDivider = 1
} else if level == 1 {
scoreDivider = 2
} else {
scoreDivider = level * 3
}
ancestorHash := hashStr(ancestor)
if _, ok := r.candidates[ancestorHash]; !ok {
r.candidates[ancestorHash] = r.initializeNodeScore(ancestor)
}
candidate := r.candidates[ancestorHash]
candidate.score += contentScore / float64(scoreDivider)
r.candidates[ancestorHash] = candidate
}
})
// After we've calculated scores, loop through all of the possible
// candidate nodes we found and find the one with the highest score.
var topCandidate *candidateItem
for hash, candidate := range r.candidates {
candidate.score
|
{
metadata.Excerpt = mapAttribute["twitter:description"]
}
|
conditional_block
|
read.go
|
String(matchString) &&
!okMaybeItsACandidate.MatchString(matchString) &&
!s.Is("body") && !s.Is("a") {
s.Remove()
return
}
if unlikelyElements.MatchString(r.getTagName(s)) {
s.Remove()
return
}
// Remove DIV, SECTION, and HEADER nodes without any content(e.g. text, image, video, or iframe).
if s.Is("div,section,header,h1,h2,h3,h4,h5,h6") && r.isElementEmpty(s) {
s.Remove()
return
}
// Turn all divs that don't have children block level elements into p's
if s.Is("div") {
sHTML, _ := s.Html()
if !divToPElements.MatchString(sHTML) {
s.Nodes[0].Data = "p"
}
}
})
// Loop through all paragraphs, and assign a score to them based on how content-y they look.
// Then add their score to their parent node.
// A score is determined by things like number of commas, class names, etc. Maybe eventually link density.
r.candidates = make(map[string]candidateItem)
doc.Find("p").Each(func(i int, s *goquery.Selection) {
// If this paragraph is less than 25 characters, don't even count it.
innerText := normalizeText(s.Text())
if strLen(innerText) < 25 {
return
}
// Exclude nodes with no ancestor.
ancestors := r.getNodeAncestors(s, 3)
if len(ancestors) == 0 {
return
}
// Calculate content score
// Add a point for the paragraph itself as a base.
contentScore := 1.0
// Add points for any commas within this paragraph.
contentScore += float64(strings.Count(innerText, ","))
contentScore += float64(strings.Count(innerText, ","))
// For every 100 characters in this paragraph, add another point. Up to 3 points.
contentScore += math.Min(math.Floor(float64(strLen(innerText)/100)), 3)
// Initialize and score ancestors.
for level, ancestor := range ancestors {
// Node score divider:
// - parent: 1 (no division)
// - grandparent: 2
// - great grandparent+: ancestor level * 3
scoreDivider := 0
if level == 0 {
scoreDivider = 1
} else if level == 1 {
scoreDivider = 2
} else {
scoreDivider = level * 3
}
ancestorHash := hashStr(ancestor)
if _, ok := r.candidates[ancestorHash]; !ok {
r.candidates[ancestorHash] = r.initializeNodeScore(ancestor)
}
candidate := r.candidates[ancestorHash]
candidate.score += contentScore / float64(scoreDivider)
r.candidates[ancestorHash] = candidate
}
})
// After we've calculated scores, loop through all of the possible
// candidate nodes we found and find the one with the highest score.
var topCandidate *candidateItem
for hash, candidate := range r.candidates {
candidate.score = candidate.score * (1 - r.getLinkDensity(candidate.node))
r.candidates[hash] = candidate
if topCandidate == nil || candidate.score > topCandidate.score {
if topCandidate == nil {
topCandidate = new(candidateItem)
}
topCandidate.score = candidate.score
topCandidate.node = candidate.node
}
}
// If top candidate not found, stop
if topCandidate == nil {
return nil
}
r.prepArticle(topCandidate.node)
return topCandidate.node
}
// Check if a node is empty
func (r *readability) isElementEmpty(s *goquery.Selection) bool {
html, _ := s.Html()
html = strings.TrimSpace(html)
return html == ""
}
// Get tag name from a node
func (r *readability) getTagName(s *goquery.Selection) string {
if s == nil || len(s.Nodes) == 0 {
return ""
}
return s.Nodes[0].Data
}
func (r *readability) getNodeAncestors(node *goquery.Selection, maxDepth int) []*goquery.Selection {
ancestors := []*goquery.Selection{}
parent := *node
for i := 0; i < maxDepth; i++ {
parent = *parent.Parent()
if len(parent.Nodes) == 0 {
return ancestors
}
ancestors = append(ancestors, &parent)
}
return ancestors
}
// Check if a given node has one of its ancestor tag name matching the provided one.
func (r *readability) hasAncestorTag(node *goquery.Selection, tag string) bool {
for parent := *node; len(parent.Nodes) > 0; parent = *parent.Parent() {
if parent.Nodes[0].Data == tag {
return true
}
}
return false
}
// Initialize a node and checks the className/id for special names
// to add to its score.
func (r *readability) initializeNodeScore(node *goquery.Selection) candidateItem {
contentScore := 0.0
switch r.getTagName(node) {
case "article":
contentScore += 10
case "section":
contentScore += 8
case "div":
contentScore += 5
case "pre", "blockquote", "td":
contentScore += 3
case "form", "ol", "ul", "dl", "dd", "dt", "li", "address":
contentScore -= 3
case "th", "h1", "h2", "h3", "h4", "h5", "h6":
contentScore -= 5
}
contentScore += r.getClassWeight(node)
return candidateItem{contentScore, node}
}
// Get an elements class/id weight. Uses regular expressions to tell if this
// element looks good or bad.
func (r *readability) getClassWeight(node *goquery.Selection) float64 {
weight := 0.0
if str, b := node.Attr("class"); b {
if negative.MatchString(str) {
weight -= 25
}
if positive.MatchString(str) {
weight += 25
}
}
if str, b := node.Attr("id"); b {
if negative.MatchString(str) {
weight -= 25
}
if positive.MatchString(str) {
weight += 25
}
}
return weight
}
// Get the density of links as a percentage of the content
// This is the amount of text that is inside a link divided by the total text in the node.
func (r *readability) getLinkDensity(node *goquery.Selection) float64 {
if node == nil {
return 0
}
textLength := strLen(normalizeText(node.Text()))
if textLength == 0 {
return 0
}
linkLength := 0
node.Find("a").Each(func(_ int, link *goquery.Selection) {
linkLength += strLen(link.Text())
})
return float64(linkLength) / float64(textLength)
}
// Prepare the article node for display. Clean out any inline styles,
// iframes, forms, strip extraneous <p> tags, etc.
func (r *readability) prepArticle(content *goquery.Selection) {
if content == nil {
return
}
// Remove styling attribute
r.cleanStyle(content)
// Clean out junk from the article content
r.cleanConditionally(content, "form")
r.cleanConditionally(content, "fieldset")
r.clean(content, "h1")
r.clean(content, "object")
r.clean(content, "embed")
r.clean(content, "footer")
r.clean(content, "link")
// If there is only one h2 or h3 and its text content substantially equals article title,
// they are probably using it as a header and not a subheader,
// so remove it since we already extract the title separately.
if content.Find("h2").Length() == 1 {
r.clean(content, "h2")
}
if content.Find("h3").Length() == 1 {
r.clean(content, "h3")
}
r.clean(content, "iframe")
r.clean(content, "input")
r.clean(content, "textarea")
r.clean(content, "select")
r.clean(content, "button")
r.cleanHeaders(content)
// Do these last as the previous stuff may have removed junk
// that will affect these
r.cleanConditionally(content, "table")
r.cleanConditionally(content, "ul")
r.cleanConditionally(content, "div")
// Fix all relative URL
r.fixRelativeURIs(content)
// Last time, clean all empty tags and remove class name
content.Find("*").Each(func(_ int, s *goquery.Selection) {
if r.isElementEmpty(s) {
s.Remove()
}
s.RemoveAttr("class")
s.RemoveAttr("id")
})
}
// Remove the style attribute on every e and under.
func (r *readability) clean
|
Style(s *g
|
identifier_name
|
|
read.go
|
ancestors = append(ancestors, &parent)
}
return ancestors
}
// Check if a given node has one of its ancestor tag name matching the provided one.
func (r *readability) hasAncestorTag(node *goquery.Selection, tag string) bool {
for parent := *node; len(parent.Nodes) > 0; parent = *parent.Parent() {
if parent.Nodes[0].Data == tag {
return true
}
}
return false
}
// Initialize a node and checks the className/id for special names
// to add to its score.
func (r *readability) initializeNodeScore(node *goquery.Selection) candidateItem {
contentScore := 0.0
switch r.getTagName(node) {
case "article":
contentScore += 10
case "section":
contentScore += 8
case "div":
contentScore += 5
case "pre", "blockquote", "td":
contentScore += 3
case "form", "ol", "ul", "dl", "dd", "dt", "li", "address":
contentScore -= 3
case "th", "h1", "h2", "h3", "h4", "h5", "h6":
contentScore -= 5
}
contentScore += r.getClassWeight(node)
return candidateItem{contentScore, node}
}
// Get an elements class/id weight. Uses regular expressions to tell if this
// element looks good or bad.
func (r *readability) getClassWeight(node *goquery.Selection) float64 {
weight := 0.0
if str, b := node.Attr("class"); b {
if negative.MatchString(str) {
weight -= 25
}
if positive.MatchString(str) {
weight += 25
}
}
if str, b := node.Attr("id"); b {
if negative.MatchString(str) {
weight -= 25
}
if positive.MatchString(str) {
weight += 25
}
}
return weight
}
// Get the density of links as a percentage of the content
// This is the amount of text that is inside a link divided by the total text in the node.
func (r *readability) getLinkDensity(node *goquery.Selection) float64 {
if node == nil {
return 0
}
textLength := strLen(normalizeText(node.Text()))
if textLength == 0 {
return 0
}
linkLength := 0
node.Find("a").Each(func(_ int, link *goquery.Selection) {
linkLength += strLen(link.Text())
})
return float64(linkLength) / float64(textLength)
}
// Prepare the article node for display. Clean out any inline styles,
// iframes, forms, strip extraneous <p> tags, etc.
func (r *readability) prepArticle(content *goquery.Selection) {
if content == nil {
return
}
// Remove styling attribute
r.cleanStyle(content)
// Clean out junk from the article content
r.cleanConditionally(content, "form")
r.cleanConditionally(content, "fieldset")
r.clean(content, "h1")
r.clean(content, "object")
r.clean(content, "embed")
r.clean(content, "footer")
r.clean(content, "link")
// If there is only one h2 or h3 and its text content substantially equals article title,
// they are probably using it as a header and not a subheader,
// so remove it since we already extract the title separately.
if content.Find("h2").Length() == 1 {
r.clean(content, "h2")
}
if content.Find("h3").Length() == 1 {
r.clean(content, "h3")
}
r.clean(content, "iframe")
r.clean(content, "input")
r.clean(content, "textarea")
r.clean(content, "select")
r.clean(content, "button")
r.cleanHeaders(content)
// Do these last as the previous stuff may have removed junk
// that will affect these
r.cleanConditionally(content, "table")
r.cleanConditionally(content, "ul")
r.cleanConditionally(content, "div")
// Fix all relative URL
r.fixRelativeURIs(content)
// Last time, clean all empty tags and remove class name
content.Find("*").Each(func(_ int, s *goquery.Selection) {
if r.isElementEmpty(s) {
s.Remove()
}
s.RemoveAttr("class")
s.RemoveAttr("id")
})
}
// Remove the style attribute on every e and under.
func (r *readability) cleanStyle(s *goquery.Selection) {
s.Find("*").Each(func(i int, s1 *goquery.Selection) {
tagName := s1.Nodes[0].Data
if tagName == "svg" {
return
}
s1.RemoveAttr("align")
s1.RemoveAttr("background")
s1.RemoveAttr("bgcolor")
s1.RemoveAttr("border")
s1.RemoveAttr("cellpadding")
s1.RemoveAttr("cellspacing")
s1.RemoveAttr("frame")
s1.RemoveAttr("hspace")
s1.RemoveAttr("rules")
s1.RemoveAttr("style")
s1.RemoveAttr("valign")
s1.RemoveAttr("vspace")
s1.RemoveAttr("onclick")
s1.RemoveAttr("onmouseover")
s1.RemoveAttr("border")
s1.RemoveAttr("style")
if tagName != "table" && tagName != "th" && tagName != "td" &&
tagName != "hr" && tagName != "pre" {
s1.RemoveAttr("width")
s1.RemoveAttr("height")
}
})
}
// Clean a node of all elements of type "tag".
// (Unless it's a youtube/vimeo video. People love movies.)
func (r *readability) clean(s *goquery.Selection, tag string) {
if s == nil {
return
}
isEmbed := false
if tag == "object" || tag == "embed" || tag == "iframe" {
isEmbed = true
}
s.Find(tag).Each(func(i int, target *goquery.Selection) {
attributeValues := ""
for _, attribute := range target.Nodes[0].Attr {
attributeValues += " " + attribute.Val
}
if isEmbed && videos.MatchString(attributeValues) {
return
}
if isEmbed && videos.MatchString(target.Text()) {
return
}
target.Remove()
})
}
// Clean an element of all tags of type "tag" if they look fishy.
// "Fishy" is an algorithm based on content length, classnames, link density, number of images & embeds, etc.
func (r *readability) cleanConditionally(e *goquery.Selection, tag string) {
if e == nil {
return
}
isList := tag == "ul" || tag == "ol"
e.Find(tag).Each(func(i int, node *goquery.Selection) {
contentScore := 0.0
weight := r.getClassWeight(node)
if weight+contentScore < 0 {
node.Remove()
return
}
// If there are not very many commas, and the number of
// non-paragraph elements is more than paragraphs or other
// ominous signs, remove the element.
nodeText := normalizeText(node.Text())
nCommas := strings.Count(nodeText, ",")
nCommas += strings.Count(nodeText, ",")
if nCommas < 10 {
p := node.Find("p").Length()
img := node.Find("img").Length()
li := node.Find("li").Length() - 100
input := node.Find("input").Length()
embedCount := 0
node.Find("embed").Each(func(i int, embed *goquery.Selection) {
if !videos.MatchString(embed.AttrOr("src", "")) {
embedCount++
}
})
linkDensity := r.getLinkDensity(node)
contentLength := strLen(normalizeText(node.Text()))
haveToRemove := (!isList && li > p) ||
(img > 1 && float64(p)/float64(img) < 0.5 && !r.hasAncestorTag(node, "figure")) ||
(float64(input) > math.Floor(float64(p)/3)) ||
(!isList && contentLength < 25 && (img == 0 || img > 2) && !r.hasAncestorTag(node, "figure")) ||
(!isList && weight < 25 && linkDensity > 0.2) ||
(weight >= 25 && linkDensity > 0.5) ||
((embedCount == 1 && contentLength < 75) || embedCount > 1)
if haveToRemove {
node.Remove()
}
}
})
}
// Clean out spurious headers from an Element. Checks things like classnames and link density.
func (r *readability) cleanHeaders(s *goquery.Selection) {
s.Fi
|
nd("h1,h2,h3").Each(func(_ int, s1 *goquery.Selection) {
if r.getClassWeight(s1) < 0 {
s1.Remove()
}
})
}
// Co
|
identifier_body
|
|
Model.py
|
def initialize(self):
self.build_CNN()
self.build_RNN()
self.build_CTC()
self.trained_batches = 0
self.learning_rate = tf.placeholder(tf.float32, shape=[])
self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(self.update_ops):
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.loss_batch)
self.sess = tf.Session()
self.saver = tf.train.Saver(max_to_keep=1)
model = tf.train.latest_checkpoint(self.path_model)
if self.is_restore and not model:
raise Exception('Model Not found')
# load saved model if available
if model:
print('Restoring Model ' + model)
self.saver.restore(self.sess, model)
else:
print('New Model')
self.sess.run(tf.global_variables_initializer())
def save(self):
self.model_id += 1
self.saver.save(self.sess, self.path_model + 'model', global_step=self.model_id)
def build_CNN(self):
cnn_input_4d = tf.expand_dims(input=self.input_images, axis=3) # adds dimensions of size 1 to the 3rd index
pool = cnn_input_4d
pool = self.create_CNN_layer(pool, filter_size=5, in_features=1, out_features=32, max_pool=(2, 2))
pool = self.create_CNN_layer(pool, filter_size=5, in_features=32, out_features=64, max_pool=(2, 2))
pool = self.create_CNN_layer(pool, filter_size=3, in_features=64, out_features=128, max_pool=(1, 2))
pool = self.create_CNN_layer(pool, filter_size=3, in_features=128, out_features=128, max_pool=(1, 2))
pool = self.create_CNN_layer(pool, filter_size=3, in_features=128, out_features=256, max_pool=(1, 2))
self.cnn_output_4d = pool
def create_CNN_layer(self, pool, filter_size, in_features, out_features, max_pool):
# initialize weights
filter = tf.Variable(tf.truncated_normal([filter_size, filter_size, in_features, out_features], stddev=0.1))
conv = tf.nn.conv2d(input=pool, filter=filter, padding='SAME', strides=(1, 1, 1, 1))
conv_norm = tf.layers.batch_normalization(conv, training=self.is_train)
relu = tf.nn.relu(conv_norm)
pool = tf.nn.max_pool(relu,
ksize=(1, max_pool[0], max_pool[1], 1),
strides=(1, max_pool[0], max_pool[1], 1),
padding='VALID')
# layer 1
# filter = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1))
# conv = tf.nn.conv2d(input=pool, filter=filter, padding='SAME', strides=(1, 1, 1, 1)) # strides=[1, 1, 1, 1], the filter window will move 1 batch, 1 height pixel, 1 width pixel and 1 color pixel
# relu = tf.nn.relu(conv)
# pool = tf.nn.max_pool(relu, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='VALID')
return pool
def build_RNN(self):
rnn_input_3d = tf.squeeze(input=self.cnn_output_4d, axis=[2]) # reduces the dimension by deleting 2nd index
# define no. of cells & layers to build
n_cell = 256
n_layers = 2
cells = []
for _ in range(n_layers):
cells.append(tf.contrib.rnn.LSTMCell(num_units=n_cell, state_is_tuple=True))
# combine the 2 simple LSTM cells sequentially
cell_multi = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True)
((fw, bw), _) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_multi,
cell_bw=cell_multi,
inputs=rnn_input_3d,
dtype=rnn_input_3d.dtype)
rnn_combined = tf.concat([fw, bw], 2) # combine the fw & bw
rnn = tf.expand_dims(rnn_combined, 2) # adds dimensions of size 1 to the 2nd index
features_in = n_cell * 2 # no. of input
features_out = len(self.char_list) + 1 # no. of output, characters + blank space
kernel = tf.Variable(tf.truncated_normal([1, 1, features_in, features_out], stddev=0.1))
rnn = tf.nn.atrous_conv2d(value=rnn, filters=kernel, rate=1, padding='SAME')
self.rnn_output_3d = tf.squeeze(rnn, axis=[2]) # reduces the dimension by deleting 2nd index
def build_CTC(self):
# transform the rnn_output dimension
self.ctc_input_3d = tf.transpose(self.rnn_output_3d, [1, 0, 2])
# transform label to tensor
self.labels = tf.SparseTensor(tf.placeholder(tf.int64, shape=[None, 2]),
tf.placeholder(tf.int32, [None]),
tf.placeholder(tf.int64, [2]))
self.seq_length = tf.placeholder(tf.int32, [None])
# calculate the loss & return the mean
loss_batch_mean = tf.nn.ctc_loss(labels=self.labels,
inputs=self.ctc_input_3d,
sequence_length=self.seq_length,
ctc_merge_repeated=True)
self.loss_batch = tf.reduce_mean(loss_batch_mean)
self.ctc_input_element = tf.placeholder(tf.float32, shape=[self.text_length, None, len(self.char_list) + 1])
# calculate the loss per each element to find the label score
self.loss_element = tf.nn.ctc_loss(labels=self.labels,
inputs=self.ctc_input_element,
sequence_length=self.seq_length,
ctc_merge_repeated=True)
if self.decoder_selected == Constants.decoder_best_path:
print("Decoder Greedy")
self.decoder = tf.nn.ctc_greedy_decoder(inputs=self.ctc_input_3d, sequence_length=self.seq_length)
elif self.decoder_selected == Constants.decoder_word_beam:
print("Decoder Word Beam")
self.load_word_beam()
def load_word_beam(self):
word_beam_search_module = tf.load_op_library(self.file_word_beam_search)
chars = str().join(self.char_list)
word_chars = open(self.file_word_char_list).read().splitlines()[0]
data_handler = DataHandler()
data_handler.prepare_collection_words()
collection_words = open(self.file_collection_words).read()
# decode the recognized word against the provided address dictionary
self.decoder = word_beam_search_module.word_beam_search(
tf.nn.softmax(self.ctc_input_3d, dim=2),
50, # batch size
'Words', # sentence or word
0.0, # smoothing
collection_words.encode('utf8'),
chars.encode('utf8'),
word_chars.encode('utf8'))
def encode(self, texts):
"transform labels to sparse tensor"
indices = []
values = []
shape = [len(texts), 0]
# iterate over the labels (texts)
for (batch_element, text) in enumerate(texts):
label_list = []
for c in text:
character = self.char_list.index(c)
label_list.append(character)
# check label list length and assign it to shape array
if len(label_list) > shape[1]:
shape[1] = len(label_list)
# transform label to tensor
for (i, label) in enumerate(label_list):
indices.append([batch_element, i])
values.append(label)
return (indices, values, shape)
def decode(self, ctc_output, batch_size):
"transform sparse tensor to labels"
encoded_label_list = [] # store batch elements labels
for i in range(batch_size):
encoded_label_list.append([])
blank = len(self.char_list) # last char is a blank
# transform tensor to char indexes
for j in range(batch_size):
for label in ctc_output[j
|
def __init__(self, char_list, restore=False):
self.decoder_selected = Constants.decoder_selected
self.path_model = Constants.path_model
self.batch_size = Constants.batch_size
self.char_list = char_list
self.learning_rate = Constants.learning_rate
self.text_length = Constants.text_length
self.img_size = Constants.img_size
self.file_word_char_list = Constants.file_word_char_list
self.file_word_beam_search = Constants.file_word_beam_search
self.file_collection_words = Constants.file_collection_words
self.is_restore = restore
self.model_id = 0
self.is_train = tf.placeholder(tf.bool, name='is_train')
self.input_images = tf.placeholder(tf.float32, shape=(None, self.img_size[0], self.img_size[1]))
self.initialize()
|
identifier_body
|
|
Model.py
|
self.file_word_beam_search = Constants.file_word_beam_search
self.file_collection_words = Constants.file_collection_words
self.is_restore = restore
self.model_id = 0
self.is_train = tf.placeholder(tf.bool, name='is_train')
self.input_images = tf.placeholder(tf.float32, shape=(None, self.img_size[0], self.img_size[1]))
self.initialize()
def initialize(self):
self.build_CNN()
self.build_RNN()
self.build_CTC()
self.trained_batches = 0
self.learning_rate = tf.placeholder(tf.float32, shape=[])
self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(self.update_ops):
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.loss_batch)
self.sess = tf.Session()
self.saver = tf.train.Saver(max_to_keep=1)
model = tf.train.latest_checkpoint(self.path_model)
if self.is_restore and not model:
raise Exception('Model Not found')
# load saved model if available
if model:
print('Restoring Model ' + model)
self.saver.restore(self.sess, model)
else:
print('New Model')
self.sess.run(tf.global_variables_initializer())
def save(self):
self.model_id += 1
self.saver.save(self.sess, self.path_model + 'model', global_step=self.model_id)
def build_CNN(self):
cnn_input_4d = tf.expand_dims(input=self.input_images, axis=3) # adds dimensions of size 1 to the 3rd index
pool = cnn_input_4d
pool = self.create_CNN_layer(pool, filter_size=5, in_features=1, out_features=32, max_pool=(2, 2))
pool = self.create_CNN_layer(pool, filter_size=5, in_features=32, out_features=64, max_pool=(2, 2))
pool = self.create_CNN_layer(pool, filter_size=3, in_features=64, out_features=128, max_pool=(1, 2))
pool = self.create_CNN_layer(pool, filter_size=3, in_features=128, out_features=128, max_pool=(1, 2))
pool = self.create_CNN_layer(pool, filter_size=3, in_features=128, out_features=256, max_pool=(1, 2))
self.cnn_output_4d = pool
def create_CNN_layer(self, pool, filter_size, in_features, out_features, max_pool):
# initialize weights
filter = tf.Variable(tf.truncated_normal([filter_size, filter_size, in_features, out_features], stddev=0.1))
conv = tf.nn.conv2d(input=pool, filter=filter, padding='SAME', strides=(1, 1, 1, 1))
conv_norm = tf.layers.batch_normalization(conv, training=self.is_train)
relu = tf.nn.relu(conv_norm)
pool = tf.nn.max_pool(relu,
ksize=(1, max_pool[0], max_pool[1], 1),
strides=(1, max_pool[0], max_pool[1], 1),
padding='VALID')
# layer 1
# filter = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1))
# conv = tf.nn.conv2d(input=pool, filter=filter, padding='SAME', strides=(1, 1, 1, 1)) # strides=[1, 1, 1, 1], the filter window will move 1 batch, 1 height pixel, 1 width pixel and 1 color pixel
# relu = tf.nn.relu(conv)
# pool = tf.nn.max_pool(relu, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='VALID')
return pool
def build_RNN(self):
rnn_input_3d = tf.squeeze(input=self.cnn_output_4d, axis=[2]) # reduces the dimension by deleting 2nd index
# define no. of cells & layers to build
n_cell = 256
n_layers = 2
cells = []
for _ in range(n_layers):
cells.append(tf.contrib.rnn.LSTMCell(num_units=n_cell, state_is_tuple=True))
# combine the 2 simple LSTM cells sequentially
|
dtype=rnn_input_3d.dtype)
rnn_combined = tf.concat([fw, bw], 2) # combine the fw & bw
rnn = tf.expand_dims(rnn_combined, 2) # adds dimensions of size 1 to the 2nd index
features_in = n_cell * 2 # no. of input
features_out = len(self.char_list) + 1 # no. of output, characters + blank space
kernel = tf.Variable(tf.truncated_normal([1, 1, features_in, features_out], stddev=0.1))
rnn = tf.nn.atrous_conv2d(value=rnn, filters=kernel, rate=1, padding='SAME')
self.rnn_output_3d = tf.squeeze(rnn, axis=[2]) # reduces the dimension by deleting 2nd index
def build_CTC(self):
# transform the rnn_output dimension
self.ctc_input_3d = tf.transpose(self.rnn_output_3d, [1, 0, 2])
# transform label to tensor
self.labels = tf.SparseTensor(tf.placeholder(tf.int64, shape=[None, 2]),
tf.placeholder(tf.int32, [None]),
tf.placeholder(tf.int64, [2]))
self.seq_length = tf.placeholder(tf.int32, [None])
# calculate the loss & return the mean
loss_batch_mean = tf.nn.ctc_loss(labels=self.labels,
inputs=self.ctc_input_3d,
sequence_length=self.seq_length,
ctc_merge_repeated=True)
self.loss_batch = tf.reduce_mean(loss_batch_mean)
self.ctc_input_element = tf.placeholder(tf.float32, shape=[self.text_length, None, len(self.char_list) + 1])
# calculate the loss per each element to find the label score
self.loss_element = tf.nn.ctc_loss(labels=self.labels,
inputs=self.ctc_input_element,
sequence_length=self.seq_length,
ctc_merge_repeated=True)
if self.decoder_selected == Constants.decoder_best_path:
print("Decoder Greedy")
self.decoder = tf.nn.ctc_greedy_decoder(inputs=self.ctc_input_3d, sequence_length=self.seq_length)
elif self.decoder_selected == Constants.decoder_word_beam:
print("Decoder Word Beam")
self.load_word_beam()
def load_word_beam(self):
word_beam_search_module = tf.load_op_library(self.file_word_beam_search)
chars = str().join(self.char_list)
word_chars = open(self.file_word_char_list).read().splitlines()[0]
data_handler = DataHandler()
data_handler.prepare_collection_words()
collection_words = open(self.file_collection_words).read()
# decode the recognized word against the provided address dictionary
self.decoder = word_beam_search_module.word_beam_search(
tf.nn.softmax(self.ctc_input_3d, dim=2),
50, # batch size
'Words', # sentence or word
0.0, # smoothing
collection_words.encode('utf8'),
chars.encode('utf8'),
word_chars.encode('utf8'))
def encode(self, texts):
"transform labels to sparse tensor"
indices = []
values = []
shape = [len(texts), 0]
# iterate over the labels (texts)
for (batch_element, text) in enumerate(texts):
label_list = []
for c in text:
character = self.char_list.index(c)
label_list.append(character)
# check label list length and assign it to shape array
if len(label_list) > shape[1]:
shape[1] = len(label_list)
# transform label to tensor
for (i, label) in enumerate(label_list):
indices.append([batch_element, i])
values.append(label)
return (indices, values, shape)
def decode(self, ctc_output, batch_size):
"transform sparse tensor to labels"
encoded_label_list = [] # store batch elements labels
for i in range(batch_size):
encoded_label_list.append([])
blank = len(self.char_list) # last char is a blank
# transform tensor to char indexes
for j in range(batch_size):
for label in ctc_output[j]:
if label == blank:
break
encoded_label_list[j].append(label)
# convert char indexes to words
word_list = []
for label in encoded_label_list:
word = []
for c in label:
char = self.char_list[c]
word.append(char)
word_list.append(str().join(word))
return word_list
def batch_train(self, batch):
n_batch_elements
|
cell_multi = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True)
((fw, bw), _) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_multi,
cell_bw=cell_multi,
inputs=rnn_input_3d,
|
random_line_split
|
Model.py
|
.file_word_beam_search = Constants.file_word_beam_search
self.file_collection_words = Constants.file_collection_words
self.is_restore = restore
self.model_id = 0
self.is_train = tf.placeholder(tf.bool, name='is_train')
self.input_images = tf.placeholder(tf.float32, shape=(None, self.img_size[0], self.img_size[1]))
self.initialize()
def initialize(self):
self.build_CNN()
self.build_RNN()
self.build_CTC()
self.trained_batches = 0
self.learning_rate = tf.placeholder(tf.float32, shape=[])
self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(self.update_ops):
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.loss_batch)
self.sess = tf.Session()
self.saver = tf.train.Saver(max_to_keep=1)
model = tf.train.latest_checkpoint(self.path_model)
if self.is_restore and not model:
raise Exception('Model Not found')
# load saved model if available
if model:
print('Restoring Model ' + model)
self.saver.restore(self.sess, model)
else:
print('New Model')
self.sess.run(tf.global_variables_initializer())
def save(self):
self.model_id += 1
self.saver.save(self.sess, self.path_model + 'model', global_step=self.model_id)
def build_CNN(self):
cnn_input_4d = tf.expand_dims(input=self.input_images, axis=3) # adds dimensions of size 1 to the 3rd index
pool = cnn_input_4d
pool = self.create_CNN_layer(pool, filter_size=5, in_features=1, out_features=32, max_pool=(2, 2))
pool = self.create_CNN_layer(pool, filter_size=5, in_features=32, out_features=64, max_pool=(2, 2))
pool = self.create_CNN_layer(pool, filter_size=3, in_features=64, out_features=128, max_pool=(1, 2))
pool = self.create_CNN_layer(pool, filter_size=3, in_features=128, out_features=128, max_pool=(1, 2))
pool = self.create_CNN_layer(pool, filter_size=3, in_features=128, out_features=256, max_pool=(1, 2))
self.cnn_output_4d = pool
def create_CNN_layer(self, pool, filter_size, in_features, out_features, max_pool):
# initialize weights
filter = tf.Variable(tf.truncated_normal([filter_size, filter_size, in_features, out_features], stddev=0.1))
conv = tf.nn.conv2d(input=pool, filter=filter, padding='SAME', strides=(1, 1, 1, 1))
conv_norm = tf.layers.batch_normalization(conv, training=self.is_train)
relu = tf.nn.relu(conv_norm)
pool = tf.nn.max_pool(relu,
ksize=(1, max_pool[0], max_pool[1], 1),
strides=(1, max_pool[0], max_pool[1], 1),
padding='VALID')
# layer 1
# filter = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1))
# conv = tf.nn.conv2d(input=pool, filter=filter, padding='SAME', strides=(1, 1, 1, 1)) # strides=[1, 1, 1, 1], the filter window will move 1 batch, 1 height pixel, 1 width pixel and 1 color pixel
# relu = tf.nn.relu(conv)
# pool = tf.nn.max_pool(relu, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='VALID')
return pool
def build_RNN(self):
rnn_input_3d = tf.squeeze(input=self.cnn_output_4d, axis=[2]) # reduces the dimension by deleting 2nd index
# define no. of cells & layers to build
n_cell = 256
n_layers = 2
cells = []
for _ in range(n_layers):
cells.append(tf.contrib.rnn.LSTMCell(num_units=n_cell, state_is_tuple=True))
# combine the 2 simple LSTM cells sequentially
cell_multi = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True)
((fw, bw), _) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_multi,
cell_bw=cell_multi,
inputs=rnn_input_3d,
dtype=rnn_input_3d.dtype)
rnn_combined = tf.concat([fw, bw], 2) # combine the fw & bw
rnn = tf.expand_dims(rnn_combined, 2) # adds dimensions of size 1 to the 2nd index
features_in = n_cell * 2 # no. of input
features_out = len(self.char_list) + 1 # no. of output, characters + blank space
kernel = tf.Variable(tf.truncated_normal([1, 1, features_in, features_out], stddev=0.1))
rnn = tf.nn.atrous_conv2d(value=rnn, filters=kernel, rate=1, padding='SAME')
self.rnn_output_3d = tf.squeeze(rnn, axis=[2]) # reduces the dimension by deleting 2nd index
def build_CTC(self):
# transform the rnn_output dimension
self.ctc_input_3d = tf.transpose(self.rnn_output_3d, [1, 0, 2])
# transform label to tensor
self.labels = tf.SparseTensor(tf.placeholder(tf.int64, shape=[None, 2]),
tf.placeholder(tf.int32, [None]),
tf.placeholder(tf.int64, [2]))
self.seq_length = tf.placeholder(tf.int32, [None])
# calculate the loss & return the mean
loss_batch_mean = tf.nn.ctc_loss(labels=self.labels,
inputs=self.ctc_input_3d,
sequence_length=self.seq_length,
ctc_merge_repeated=True)
self.loss_batch = tf.reduce_mean(loss_batch_mean)
self.ctc_input_element = tf.placeholder(tf.float32, shape=[self.text_length, None, len(self.char_list) + 1])
# calculate the loss per each element to find the label score
self.loss_element = tf.nn.ctc_loss(labels=self.labels,
inputs=self.ctc_input_element,
sequence_length=self.seq_length,
ctc_merge_repeated=True)
if self.decoder_selected == Constants.decoder_best_path:
print("Decoder Greedy")
self.decoder = tf.nn.ctc_greedy_decoder(inputs=self.ctc_input_3d, sequence_length=self.seq_length)
elif self.decoder_selected == Constants.decoder_word_beam:
print("Decoder Word Beam")
self.load_word_beam()
def
|
(self):
word_beam_search_module = tf.load_op_library(self.file_word_beam_search)
chars = str().join(self.char_list)
word_chars = open(self.file_word_char_list).read().splitlines()[0]
data_handler = DataHandler()
data_handler.prepare_collection_words()
collection_words = open(self.file_collection_words).read()
# decode the recognized word against the provided address dictionary
self.decoder = word_beam_search_module.word_beam_search(
tf.nn.softmax(self.ctc_input_3d, dim=2),
50, # batch size
'Words', # sentence or word
0.0, # smoothing
collection_words.encode('utf8'),
chars.encode('utf8'),
word_chars.encode('utf8'))
def encode(self, texts):
"transform labels to sparse tensor"
indices = []
values = []
shape = [len(texts), 0]
# iterate over the labels (texts)
for (batch_element, text) in enumerate(texts):
label_list = []
for c in text:
character = self.char_list.index(c)
label_list.append(character)
# check label list length and assign it to shape array
if len(label_list) > shape[1]:
shape[1] = len(label_list)
# transform label to tensor
for (i, label) in enumerate(label_list):
indices.append([batch_element, i])
values.append(label)
return (indices, values, shape)
def decode(self, ctc_output, batch_size):
"transform sparse tensor to labels"
encoded_label_list = [] # store batch elements labels
for i in range(batch_size):
encoded_label_list.append([])
blank = len(self.char_list) # last char is a blank
# transform tensor to char indexes
for j in range(batch_size):
for label in ctc_output[j]:
if label == blank:
break
encoded_label_list[j].append(label)
# convert char indexes to words
word_list = []
for label in encoded_label_list:
word = []
for c in label:
char = self.char_list[c]
word.append(char)
word_list.append(str().join(word))
return word_list
def batch_train(self, batch):
n_batch
|
load_word_beam
|
identifier_name
|
Model.py
|
.file_word_beam_search = Constants.file_word_beam_search
self.file_collection_words = Constants.file_collection_words
self.is_restore = restore
self.model_id = 0
self.is_train = tf.placeholder(tf.bool, name='is_train')
self.input_images = tf.placeholder(tf.float32, shape=(None, self.img_size[0], self.img_size[1]))
self.initialize()
def initialize(self):
self.build_CNN()
self.build_RNN()
self.build_CTC()
self.trained_batches = 0
self.learning_rate = tf.placeholder(tf.float32, shape=[])
self.update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(self.update_ops):
self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate).minimize(self.loss_batch)
self.sess = tf.Session()
self.saver = tf.train.Saver(max_to_keep=1)
model = tf.train.latest_checkpoint(self.path_model)
if self.is_restore and not model:
raise Exception('Model Not found')
# load saved model if available
if model:
print('Restoring Model ' + model)
self.saver.restore(self.sess, model)
else:
print('New Model')
self.sess.run(tf.global_variables_initializer())
def save(self):
self.model_id += 1
self.saver.save(self.sess, self.path_model + 'model', global_step=self.model_id)
def build_CNN(self):
cnn_input_4d = tf.expand_dims(input=self.input_images, axis=3) # adds dimensions of size 1 to the 3rd index
pool = cnn_input_4d
pool = self.create_CNN_layer(pool, filter_size=5, in_features=1, out_features=32, max_pool=(2, 2))
pool = self.create_CNN_layer(pool, filter_size=5, in_features=32, out_features=64, max_pool=(2, 2))
pool = self.create_CNN_layer(pool, filter_size=3, in_features=64, out_features=128, max_pool=(1, 2))
pool = self.create_CNN_layer(pool, filter_size=3, in_features=128, out_features=128, max_pool=(1, 2))
pool = self.create_CNN_layer(pool, filter_size=3, in_features=128, out_features=256, max_pool=(1, 2))
self.cnn_output_4d = pool
def create_CNN_layer(self, pool, filter_size, in_features, out_features, max_pool):
# initialize weights
filter = tf.Variable(tf.truncated_normal([filter_size, filter_size, in_features, out_features], stddev=0.1))
conv = tf.nn.conv2d(input=pool, filter=filter, padding='SAME', strides=(1, 1, 1, 1))
conv_norm = tf.layers.batch_normalization(conv, training=self.is_train)
relu = tf.nn.relu(conv_norm)
pool = tf.nn.max_pool(relu,
ksize=(1, max_pool[0], max_pool[1], 1),
strides=(1, max_pool[0], max_pool[1], 1),
padding='VALID')
# layer 1
# filter = tf.Variable(tf.truncated_normal([5, 5, 1, 32], stddev=0.1))
# conv = tf.nn.conv2d(input=pool, filter=filter, padding='SAME', strides=(1, 1, 1, 1)) # strides=[1, 1, 1, 1], the filter window will move 1 batch, 1 height pixel, 1 width pixel and 1 color pixel
# relu = tf.nn.relu(conv)
# pool = tf.nn.max_pool(relu, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1), padding='VALID')
return pool
def build_RNN(self):
rnn_input_3d = tf.squeeze(input=self.cnn_output_4d, axis=[2]) # reduces the dimension by deleting 2nd index
# define no. of cells & layers to build
n_cell = 256
n_layers = 2
cells = []
for _ in range(n_layers):
cells.append(tf.contrib.rnn.LSTMCell(num_units=n_cell, state_is_tuple=True))
# combine the 2 simple LSTM cells sequentially
cell_multi = tf.contrib.rnn.MultiRNNCell(cells, state_is_tuple=True)
((fw, bw), _) = tf.nn.bidirectional_dynamic_rnn(cell_fw=cell_multi,
cell_bw=cell_multi,
inputs=rnn_input_3d,
dtype=rnn_input_3d.dtype)
rnn_combined = tf.concat([fw, bw], 2) # combine the fw & bw
rnn = tf.expand_dims(rnn_combined, 2) # adds dimensions of size 1 to the 2nd index
features_in = n_cell * 2 # no. of input
features_out = len(self.char_list) + 1 # no. of output, characters + blank space
kernel = tf.Variable(tf.truncated_normal([1, 1, features_in, features_out], stddev=0.1))
rnn = tf.nn.atrous_conv2d(value=rnn, filters=kernel, rate=1, padding='SAME')
self.rnn_output_3d = tf.squeeze(rnn, axis=[2]) # reduces the dimension by deleting 2nd index
def build_CTC(self):
# transform the rnn_output dimension
self.ctc_input_3d = tf.transpose(self.rnn_output_3d, [1, 0, 2])
# transform label to tensor
self.labels = tf.SparseTensor(tf.placeholder(tf.int64, shape=[None, 2]),
tf.placeholder(tf.int32, [None]),
tf.placeholder(tf.int64, [2]))
self.seq_length = tf.placeholder(tf.int32, [None])
# calculate the loss & return the mean
loss_batch_mean = tf.nn.ctc_loss(labels=self.labels,
inputs=self.ctc_input_3d,
sequence_length=self.seq_length,
ctc_merge_repeated=True)
self.loss_batch = tf.reduce_mean(loss_batch_mean)
self.ctc_input_element = tf.placeholder(tf.float32, shape=[self.text_length, None, len(self.char_list) + 1])
# calculate the loss per each element to find the label score
self.loss_element = tf.nn.ctc_loss(labels=self.labels,
inputs=self.ctc_input_element,
sequence_length=self.seq_length,
ctc_merge_repeated=True)
if self.decoder_selected == Constants.decoder_best_path:
print("Decoder Greedy")
self.decoder = tf.nn.ctc_greedy_decoder(inputs=self.ctc_input_3d, sequence_length=self.seq_length)
elif self.decoder_selected == Constants.decoder_word_beam:
print("Decoder Word Beam")
self.load_word_beam()
def load_word_beam(self):
word_beam_search_module = tf.load_op_library(self.file_word_beam_search)
chars = str().join(self.char_list)
word_chars = open(self.file_word_char_list).read().splitlines()[0]
data_handler = DataHandler()
data_handler.prepare_collection_words()
collection_words = open(self.file_collection_words).read()
# decode the recognized word against the provided address dictionary
self.decoder = word_beam_search_module.word_beam_search(
tf.nn.softmax(self.ctc_input_3d, dim=2),
50, # batch size
'Words', # sentence or word
0.0, # smoothing
collection_words.encode('utf8'),
chars.encode('utf8'),
word_chars.encode('utf8'))
def encode(self, texts):
"transform labels to sparse tensor"
indices = []
values = []
shape = [len(texts), 0]
# iterate over the labels (texts)
for (batch_element, text) in enumerate(texts):
label_list = []
for c in text:
character = self.char_list.index(c)
label_list.append(character)
# check label list length and assign it to shape array
if len(label_list) > shape[1]:
shape[1] = len(label_list)
# transform label to tensor
for (i, label) in enumerate(label_list):
|
return (indices, values, shape)
def decode(self, ctc_output, batch_size):
"transform sparse tensor to labels"
encoded_label_list = [] # store batch elements labels
for i in range(batch_size):
encoded_label_list.append([])
blank = len(self.char_list) # last char is a blank
# transform tensor to char indexes
for j in range(batch_size):
for label in ctc_output[j]:
if label == blank:
break
encoded_label_list[j].append(label)
# convert char indexes to words
word_list = []
for label in encoded_label_list:
word = []
for c in label:
char = self.char_list[c]
word.append(char)
word_list.append(str().join(word))
return word_list
def batch_train(self, batch):
n_batch
|
indices.append([batch_element, i])
values.append(label)
|
conditional_block
|
process.py
|
.update_refpix_metadata(ifg_paths, refx, refy, transform, params)
log.debug("refpx, refpy: "+str(refx) + " " + str(refy))
ifg.close()
return int(refx), int(refy)
def _orb_fit_calc(multi_paths: List[MultiplePaths], params, preread_ifgs=None) -> None:
"""
MPI wrapper for orbital fit correction
"""
if not params[cf.ORBITAL_FIT]:
log.info('Orbital correction not required!')
print('Orbital correction not required!')
return
log.info('Calculating orbital correction')
ifg_paths = [p.sampled_path for p in multi_paths]
if preread_ifgs: # don't check except for mpi tests
# perform some general error/sanity checks
log.debug('Checking Orbital error correction status')
if mpiops.run_once(shared.check_correction_status, ifg_paths, ifc.PYRATE_ORBITAL_ERROR):
log.debug('Orbital error correction not required as all ifgs are already corrected!')
return # return if True condition returned
if params[cf.ORBITAL_FIT_METHOD] == 1:
prcs_ifgs = mpiops.array_split(ifg_paths)
orbital.remove_orbital_error(prcs_ifgs, params, preread_ifgs)
else:
# Here we do all the multilooking in one process, but in memory
# can use multiple processes if we write data to disc during
# remove_orbital_error step
# A performance comparison should be made for saving multilooked
# files on disc vs in memory single process multilooking
if mpiops.rank == MASTER_PROCESS:
headers = [find_header(p, params) for p in multi_paths]
orbital.remove_orbital_error(ifg_paths, params, headers, preread_ifgs=preread_ifgs)
mpiops.comm.barrier()
log.debug('Finished Orbital error correction')
def _ref_phase_estimation(ifg_paths, params, refpx, refpy):
"""
Wrapper for reference phase estimation.
"""
log.info("Calculating reference phase and correcting each interferogram")
if len(ifg_paths) < 2:
raise rpe.ReferencePhaseError(
"At least two interferograms required for reference phase correction ({len_ifg_paths} "
"provided).".format(len_ifg_paths=len(ifg_paths))
)
if mpiops.run_once(shared.check_correction_status, ifg_paths, ifc.PYRATE_REF_PHASE):
log.debug('Finished reference phase correction')
return
if params[cf.REF_EST_METHOD] == 1:
ref_phs = rpe.est_ref_phase_method1(ifg_paths, params)
elif params[cf.REF_EST_METHOD] == 2:
ref_phs = rpe.est_ref_phase_method2(ifg_paths, params, refpx, refpy)
else:
raise rpe.ReferencePhaseError("No such option, use '1' or '2'.")
# Save reference phase numpy arrays to disk.
ref_phs_file = os.path.join(params[cf.TMPDIR], 'ref_phs.npy')
if mpiops.rank == MASTER_PROCESS:
collected_ref_phs = np.zeros(len(ifg_paths), dtype=np.float64)
process_indices = mpiops.array_split(range(len(ifg_paths)))
collected_ref_phs[process_indices] = ref_phs
for r in range(1, mpiops.size):
process_indices = mpiops.array_split(range(len(ifg_paths)), r)
this_process_ref_phs = np.zeros(shape=len(process_indices),
dtype=np.float64)
mpiops.comm.Recv(this_process_ref_phs, source=r, tag=r)
collected_ref_phs[process_indices] = this_process_ref_phs
np.save(file=ref_phs_file, arr=collected_ref_phs)
else:
mpiops.comm.Send(ref_phs, dest=MASTER_PROCESS, tag=mpiops.rank)
log.debug('Finished reference phase correction')
# Preserve old return value so tests don't break.
if isinstance(ifg_paths[0], Ifg):
ifgs = ifg_paths
else:
ifgs = [Ifg(ifg_path) for ifg_path in ifg_paths]
mpiops.comm.barrier()
return ref_phs, ifgs
def main(params):
"""
Top level function to perform PyRate workflow on given interferograms
:return: refpt: tuple of reference pixel x and y position
:rtype: tuple
:return: maxvar: array of maximum variance values of interferograms
:rtype: ndarray
:return: vcmt: Variance-covariance matrix array
:rtype: ndarray
"""
mpi_vs_multiprocess_logging("process", params)
ifg_paths = []
for ifg_path in params[cf.INTERFEROGRAM_FILES]:
ifg_paths.append(ifg_path.sampled_path)
rows, cols = params["rows"], params["cols"]
return process_ifgs(ifg_paths, params, rows, cols)
def process_ifgs(ifg_paths, params, rows, cols):
"""
Top level function to perform PyRate workflow on given interferograms
:param list ifg_paths: List of interferogram paths
:param dict params: Dictionary of configuration parameters
:param int rows: Number of sub-tiles in y direction
:param int cols: Number of sub-tiles in x direction
:return: refpt: tuple of reference pixel x and y position
:rtype: tuple
:return: maxvar: array of maximum variance values of interferograms
:rtype: ndarray
:return: vcmt: Variance-covariance matrix array
:rtype: ndarray
"""
if mpiops.size > 1: # turn of multiprocessing during mpi jobs
params[cf.PARALLEL] = False
outdir = params[cf.TMPDIR]
if not os.path.exists(outdir):
shared.mkdir_p(outdir)
tiles = mpiops.run_once(get_tiles, ifg_paths[0], rows, cols)
preread_ifgs = _create_ifg_dict(ifg_paths, params=params)
# validate user supplied ref pixel
refpixel.validate_supplied_lat_lon(params)
refpx, refpy = _ref_pixel_calc(ifg_paths, params)
# remove non ifg keys
_ = [preread_ifgs.pop(k) for k in ['gt', 'epochlist', 'md', 'wkt']]
multi_paths = params[cf.INTERFEROGRAM_FILES]
_orb_fit_calc(multi_paths, params, preread_ifgs)
_ref_phase_estimation(ifg_paths, params, refpx, refpy)
shared.save_numpy_phase(ifg_paths, tiles, params)
_mst_calc(ifg_paths, params, tiles, preread_ifgs)
# spatio-temporal aps filter
wrap_spatio_temporal_filter(ifg_paths, params, tiles, preread_ifgs)
maxvar, vcmt = _maxvar_vcm_calc(ifg_paths, params, preread_ifgs)
# save phase data tiles as numpy array for timeseries and stackrate calc
shared.save_numpy_phase(ifg_paths, tiles, params)
_timeseries_calc(ifg_paths, params, vcmt, tiles, preread_ifgs)
_stack_calc(ifg_paths, params, vcmt, tiles, preread_ifgs)
log.info('PyRate workflow completed')
return (refpx, refpy), maxvar, vcmt
def _stack_calc(ifg_paths, params, vcmt, tiles, preread_ifgs):
"""
MPI wrapper for stacking calculation
"""
process_tiles = mpiops.array_split(tiles)
log.info('Calculating rate map from stacking')
output_dir = params[cf.TMPDIR]
for t in process_tiles:
log.info('Stacking of tile {}'.format(t.index))
ifg_parts = [shared.IfgPart(p, t, preread_ifgs, params) for p in ifg_paths]
mst_grid_n = np.load(os.path.join(output_dir, 'mst_mat_{}.npy'.format(t.index)))
rate, error, samples = stack.stack_rate_array(ifg_parts, params, vcmt, mst_grid_n)
# declare file names
np.save(file=os.path.join(output_dir, 'stack_rate_{}.npy'.format(t.index)), arr=rate)
np.save(file=os.path.join(output_dir, 'stack_error_{}.npy'.format(t.index)), arr=error)
np.save(file=os.path.join(output_dir, 'stack_samples_{}.npy'.format(t.index)), arr=samples)
mpiops.comm.barrier()
log.debug("Finished stack rate calc!")
def _maxvar_vcm_calc(ifg_paths, params, preread_ifgs):
"""
MPI wrapper for maxvar and vcmt computation
"""
log.info('Calculating the temporal variance-covariance matrix')
process_indices = mpiops.array_split(range(len(ifg_paths)))
def _get_r_dist(ifg_path):
|
"""
Get RDIst class object
"""
ifg = Ifg(ifg_path)
ifg.open()
r_dist = vcm_module.RDist(ifg)()
ifg.close()
return r_dist
|
identifier_body
|
|
process.py
|
))
refpixel.update_refpix_metadata(ifg_paths, refx, refy, transform, params)
log.debug("refpx, refpy: "+str(refx) + " " + str(refy))
ifg.close()
return int(refx), int(refy)
def _orb_fit_calc(multi_paths: List[MultiplePaths], params, preread_ifgs=None) -> None:
"""
MPI wrapper for orbital fit correction
"""
if not params[cf.ORBITAL_FIT]:
log.info('Orbital correction not required!')
print('Orbital correction not required!')
return
log.info('Calculating orbital correction')
ifg_paths = [p.sampled_path for p in multi_paths]
if preread_ifgs: # don't check except for mpi tests
# perform some general error/sanity checks
log.debug('Checking Orbital error correction status')
if mpiops.run_once(shared.check_correction_status, ifg_paths, ifc.PYRATE_ORBITAL_ERROR):
log.debug('Orbital error correction not required as all ifgs are already corrected!')
return # return if True condition returned
if params[cf.ORBITAL_FIT_METHOD] == 1:
prcs_ifgs = mpiops.array_split(ifg_paths)
orbital.remove_orbital_error(prcs_ifgs, params, preread_ifgs)
else:
# Here we do all the multilooking in one process, but in memory
# can use multiple processes if we write data to disc during
# remove_orbital_error step
# A performance comparison should be made for saving multilooked
# files on disc vs in memory single process multilooking
if mpiops.rank == MASTER_PROCESS:
headers = [find_header(p, params) for p in multi_paths]
orbital.remove_orbital_error(ifg_paths, params, headers, preread_ifgs=preread_ifgs)
mpiops.comm.barrier()
log.debug('Finished Orbital error correction')
def _ref_phase_estimation(ifg_paths, params, refpx, refpy):
"""
Wrapper for reference phase estimation.
"""
log.info("Calculating reference phase and correcting each interferogram")
if len(ifg_paths) < 2:
raise rpe.ReferencePhaseError(
"At least two interferograms required for reference phase correction ({len_ifg_paths} "
"provided).".format(len_ifg_paths=len(ifg_paths))
)
if mpiops.run_once(shared.check_correction_status, ifg_paths, ifc.PYRATE_REF_PHASE):
log.debug('Finished reference phase correction')
return
if params[cf.REF_EST_METHOD] == 1:
ref_phs = rpe.est_ref_phase_method1(ifg_paths, params)
elif params[cf.REF_EST_METHOD] == 2:
ref_phs = rpe.est_ref_phase_method2(ifg_paths, params, refpx, refpy)
else:
raise rpe.ReferencePhaseError("No such option, use '1' or '2'.")
# Save reference phase numpy arrays to disk.
ref_phs_file = os.path.join(params[cf.TMPDIR], 'ref_phs.npy')
if mpiops.rank == MASTER_PROCESS:
collected_ref_phs = np.zeros(len(ifg_paths), dtype=np.float64)
process_indices = mpiops.array_split(range(len(ifg_paths)))
collected_ref_phs[process_indices] = ref_phs
for r in range(1, mpiops.size):
process_indices = mpiops.array_split(range(len(ifg_paths)), r)
this_process_ref_phs = np.zeros(shape=len(process_indices),
dtype=np.float64)
mpiops.comm.Recv(this_process_ref_phs, source=r, tag=r)
collected_ref_phs[process_indices] = this_process_ref_phs
np.save(file=ref_phs_file, arr=collected_ref_phs)
else:
mpiops.comm.Send(ref_phs, dest=MASTER_PROCESS, tag=mpiops.rank)
log.debug('Finished reference phase correction')
# Preserve old return value so tests don't break.
if isinstance(ifg_paths[0], Ifg):
ifgs = ifg_paths
else:
ifgs = [Ifg(ifg_path) for ifg_path in ifg_paths]
mpiops.comm.barrier()
return ref_phs, ifgs
def main(params):
"""
Top level function to perform PyRate workflow on given interferograms
:return: refpt: tuple of reference pixel x and y position
:rtype: tuple
:return: maxvar: array of maximum variance values of interferograms
:rtype: ndarray
:return: vcmt: Variance-covariance matrix array
:rtype: ndarray
"""
mpi_vs_multiprocess_logging("process", params)
ifg_paths = []
for ifg_path in params[cf.INTERFEROGRAM_FILES]:
ifg_paths.append(ifg_path.sampled_path)
rows, cols = params["rows"], params["cols"]
return process_ifgs(ifg_paths, params, rows, cols)
def process_ifgs(ifg_paths, params, rows, cols):
"""
Top level function to perform PyRate workflow on given interferograms
:param list ifg_paths: List of interferogram paths
:param dict params: Dictionary of configuration parameters
:param int rows: Number of sub-tiles in y direction
:param int cols: Number of sub-tiles in x direction
:return: refpt: tuple of reference pixel x and y position
:rtype: tuple
:return: maxvar: array of maximum variance values of interferograms
:rtype: ndarray
:return: vcmt: Variance-covariance matrix array
:rtype: ndarray
"""
if mpiops.size > 1: # turn of multiprocessing during mpi jobs
params[cf.PARALLEL] = False
outdir = params[cf.TMPDIR]
if not os.path.exists(outdir):
shared.mkdir_p(outdir)
tiles = mpiops.run_once(get_tiles, ifg_paths[0], rows, cols)
preread_ifgs = _create_ifg_dict(ifg_paths, params=params)
# validate user supplied ref pixel
refpixel.validate_supplied_lat_lon(params)
refpx, refpy = _ref_pixel_calc(ifg_paths, params)
# remove non ifg keys
_ = [preread_ifgs.pop(k) for k in ['gt', 'epochlist', 'md', 'wkt']]
multi_paths = params[cf.INTERFEROGRAM_FILES]
_orb_fit_calc(multi_paths, params, preread_ifgs)
_ref_phase_estimation(ifg_paths, params, refpx, refpy)
shared.save_numpy_phase(ifg_paths, tiles, params)
_mst_calc(ifg_paths, params, tiles, preread_ifgs)
# spatio-temporal aps filter
wrap_spatio_temporal_filter(ifg_paths, params, tiles, preread_ifgs)
maxvar, vcmt = _maxvar_vcm_calc(ifg_paths, params, preread_ifgs)
# save phase data tiles as numpy array for timeseries and stackrate calc
shared.save_numpy_phase(ifg_paths, tiles, params)
_timeseries_calc(ifg_paths, params, vcmt, tiles, preread_ifgs)
_stack_calc(ifg_paths, params, vcmt, tiles, preread_ifgs)
log.info('PyRate workflow completed')
return (refpx, refpy), maxvar, vcmt
def _stack_calc(ifg_paths, params, vcmt, tiles, preread_ifgs):
"""
MPI wrapper for stacking calculation
"""
process_tiles = mpiops.array_split(tiles)
log.info('Calculating rate map from stacking')
output_dir = params[cf.TMPDIR]
for t in process_tiles:
log.info('Stacking of tile {}'.format(t.index))
ifg_parts = [shared.IfgPart(p, t, preread_ifgs, params) for p in ifg_paths]
mst_grid_n = np.load(os.path.join(output_dir, 'mst_mat_{}.npy'.format(t.index)))
rate, error, samples = stack.stack_rate_array(ifg_parts, params, vcmt, mst_grid_n)
# declare file names
np.save(file=os.path.join(output_dir, 'stack_rate_{}.npy'.format(t.index)), arr=rate)
np.save(file=os.path.join(output_dir, 'stack_error_{}.npy'.format(t.index)), arr=error)
np.save(file=os.path.join(output_dir, 'stack_samples_{}.npy'.format(t.index)), arr=samples)
mpiops.comm.barrier()
log.debug("Finished stack rate calc!")
def _maxvar_vcm_calc(ifg_paths, params, preread_ifgs):
"""
MPI wrapper for maxvar and vcmt computation
"""
log.info('Calculating the temporal variance-covariance matrix')
process_indices = mpiops.array_split(range(len(ifg_paths)))
def _get_r_dist(ifg_path):
"""
Get RDIst class object
"""
ifg = Ifg(ifg_path)
ifg.open()
r_dist = vcm_module.RDist(ifg)()
|
ifg.close()
|
random_line_split
|
|
process.py
|
eread_ifgs.pk')
if mpiops.rank == MASTER_PROCESS:
# add some extra information that's also useful later
gt, md, wkt = shared.get_geotiff_header_info(process_tifs[0])
epochlist = algorithm.get_epochs(ifgs_dict)[0]
log.info('Found {} unique epochs in the {} interferogram network'.format(len(epochlist.dates), nifgs))
ifgs_dict['epochlist'] = epochlist
ifgs_dict['gt'] = gt
ifgs_dict['md'] = md
ifgs_dict['wkt'] = wkt
# dump ifgs_dict file for later use
cp.dump(ifgs_dict, open(preread_ifgs_file, 'wb'))
mpiops.comm.barrier()
preread_ifgs = OrderedDict(sorted(cp.load(open(preread_ifgs_file, 'rb')).items()))
log.debug('Finished converting phase_data to numpy in process {}'.format(mpiops.rank))
return preread_ifgs
def _mst_calc(dest_tifs, params, tiles, preread_ifgs):
"""
MPI wrapper function for MST calculation
"""
process_tiles = mpiops.array_split(tiles)
log.info('Calculating minimum spanning tree matrix')
def
|
(tile, i, preread_ifgs):
"""
Convenient inner loop for mst tile saving
"""
mst_tile = mst.mst_multiprocessing(tile, dest_tifs, preread_ifgs, params)
# locally save the mst_mat
mst_file_process_n = join(params[cf.TMPDIR], 'mst_mat_{}.npy'.format(i))
np.save(file=mst_file_process_n, arr=mst_tile)
for t in process_tiles:
_save_mst_tile(t, t.index, preread_ifgs)
log.debug('Finished mst calculation for process {}'.format(mpiops.rank))
mpiops.comm.barrier()
def _ref_pixel_calc(ifg_paths: List[str], params: dict) -> Tuple[int, int]:
"""
Wrapper for reference pixel calculation
"""
lon = params[cf.REFX]
lat = params[cf.REFY]
ifg = Ifg(ifg_paths[0])
ifg.open(readonly=True)
# assume all interferograms have same projection and will share the same transform
transform = ifg.dataset.GetGeoTransform()
if lon == -1 or lat == -1:
log.info('Searching for best reference pixel location')
half_patch_size, thresh, grid = refpixel.ref_pixel_setup(ifg_paths, params)
process_grid = mpiops.array_split(grid)
refpixel.save_ref_pixel_blocks(process_grid, half_patch_size, ifg_paths, params)
mean_sds = refpixel._ref_pixel_mpi(process_grid, half_patch_size, ifg_paths, thresh, params)
mean_sds = mpiops.comm.gather(mean_sds, root=0)
if mpiops.rank == MASTER_PROCESS:
mean_sds = np.hstack(mean_sds)
refpixel_returned = mpiops.run_once(refpixel.find_min_mean, mean_sds, grid)
if isinstance(refpixel_returned, ValueError):
from pyrate.core.refpixel import RefPixelError
raise RefPixelError(
"Reference pixel calculation returned an all nan slice!\n"
"Cannot continue downstream computation. Please change reference pixel algorithm used before "
"continuing.")
refy, refx = refpixel_returned # row first means first value is latitude
log.info('Selected reference pixel coordinate (x, y): ({}, {})'.format(refx, refy))
lon, lat = refpixel.convert_pixel_value_to_geographic_coordinate(refx, refy, transform)
log.info('Selected reference pixel coordinate (lon, lat): ({}, {})'.format(lon, lat))
else:
log.info('Using reference pixel from config file (lon, lat): ({}, {})'.format(lon, lat))
log.warning("Ensure user supplied reference pixel values are in lon/lat")
refx, refy = refpixel.convert_geographic_coordinate_to_pixel_value(lon, lat, transform)
log.info('Converted reference pixel coordinate (x, y): ({}, {})'.format(refx, refy))
refpixel.update_refpix_metadata(ifg_paths, refx, refy, transform, params)
log.debug("refpx, refpy: "+str(refx) + " " + str(refy))
ifg.close()
return int(refx), int(refy)
def _orb_fit_calc(multi_paths: List[MultiplePaths], params, preread_ifgs=None) -> None:
"""
MPI wrapper for orbital fit correction
"""
if not params[cf.ORBITAL_FIT]:
log.info('Orbital correction not required!')
print('Orbital correction not required!')
return
log.info('Calculating orbital correction')
ifg_paths = [p.sampled_path for p in multi_paths]
if preread_ifgs: # don't check except for mpi tests
# perform some general error/sanity checks
log.debug('Checking Orbital error correction status')
if mpiops.run_once(shared.check_correction_status, ifg_paths, ifc.PYRATE_ORBITAL_ERROR):
log.debug('Orbital error correction not required as all ifgs are already corrected!')
return # return if True condition returned
if params[cf.ORBITAL_FIT_METHOD] == 1:
prcs_ifgs = mpiops.array_split(ifg_paths)
orbital.remove_orbital_error(prcs_ifgs, params, preread_ifgs)
else:
# Here we do all the multilooking in one process, but in memory
# can use multiple processes if we write data to disc during
# remove_orbital_error step
# A performance comparison should be made for saving multilooked
# files on disc vs in memory single process multilooking
if mpiops.rank == MASTER_PROCESS:
headers = [find_header(p, params) for p in multi_paths]
orbital.remove_orbital_error(ifg_paths, params, headers, preread_ifgs=preread_ifgs)
mpiops.comm.barrier()
log.debug('Finished Orbital error correction')
def _ref_phase_estimation(ifg_paths, params, refpx, refpy):
"""
Wrapper for reference phase estimation.
"""
log.info("Calculating reference phase and correcting each interferogram")
if len(ifg_paths) < 2:
raise rpe.ReferencePhaseError(
"At least two interferograms required for reference phase correction ({len_ifg_paths} "
"provided).".format(len_ifg_paths=len(ifg_paths))
)
if mpiops.run_once(shared.check_correction_status, ifg_paths, ifc.PYRATE_REF_PHASE):
log.debug('Finished reference phase correction')
return
if params[cf.REF_EST_METHOD] == 1:
ref_phs = rpe.est_ref_phase_method1(ifg_paths, params)
elif params[cf.REF_EST_METHOD] == 2:
ref_phs = rpe.est_ref_phase_method2(ifg_paths, params, refpx, refpy)
else:
raise rpe.ReferencePhaseError("No such option, use '1' or '2'.")
# Save reference phase numpy arrays to disk.
ref_phs_file = os.path.join(params[cf.TMPDIR], 'ref_phs.npy')
if mpiops.rank == MASTER_PROCESS:
collected_ref_phs = np.zeros(len(ifg_paths), dtype=np.float64)
process_indices = mpiops.array_split(range(len(ifg_paths)))
collected_ref_phs[process_indices] = ref_phs
for r in range(1, mpiops.size):
process_indices = mpiops.array_split(range(len(ifg_paths)), r)
this_process_ref_phs = np.zeros(shape=len(process_indices),
dtype=np.float64)
mpiops.comm.Recv(this_process_ref_phs, source=r, tag=r)
collected_ref_phs[process_indices] = this_process_ref_phs
np.save(file=ref_phs_file, arr=collected_ref_phs)
else:
mpiops.comm.Send(ref_phs, dest=MASTER_PROCESS, tag=mpiops.rank)
log.debug('Finished reference phase correction')
# Preserve old return value so tests don't break.
if isinstance(ifg_paths[0], Ifg):
ifgs = ifg_paths
else:
ifgs = [Ifg(ifg_path) for ifg_path in ifg_paths]
mpiops.comm.barrier()
return ref_phs, ifgs
def main(params):
"""
Top level function to perform PyRate workflow on given interferograms
:return: refpt: tuple of reference pixel x and y position
:rtype: tuple
:return: maxvar: array of maximum variance values of interferograms
:rtype: ndarray
:return: vcmt: Variance-covariance matrix array
:rtype: ndarray
"""
mpi_vs_multiprocess_logging("process", params)
ifg_paths = []
for ifg_path in params[cf.INTERFEROGRAM_FILES]:
ifg_paths.append(ifg_path.sampled_path)
rows, cols = params["rows"], params["cols"]
return process_ifgs(ifg_paths, params, rows, cols)
def process_ifgs(ifg_paths, params, rows,
|
_save_mst_tile
|
identifier_name
|
process.py
|
eread_ifgs.pk')
if mpiops.rank == MASTER_PROCESS:
# add some extra information that's also useful later
gt, md, wkt = shared.get_geotiff_header_info(process_tifs[0])
epochlist = algorithm.get_epochs(ifgs_dict)[0]
log.info('Found {} unique epochs in the {} interferogram network'.format(len(epochlist.dates), nifgs))
ifgs_dict['epochlist'] = epochlist
ifgs_dict['gt'] = gt
ifgs_dict['md'] = md
ifgs_dict['wkt'] = wkt
# dump ifgs_dict file for later use
cp.dump(ifgs_dict, open(preread_ifgs_file, 'wb'))
mpiops.comm.barrier()
preread_ifgs = OrderedDict(sorted(cp.load(open(preread_ifgs_file, 'rb')).items()))
log.debug('Finished converting phase_data to numpy in process {}'.format(mpiops.rank))
return preread_ifgs
def _mst_calc(dest_tifs, params, tiles, preread_ifgs):
"""
MPI wrapper function for MST calculation
"""
process_tiles = mpiops.array_split(tiles)
log.info('Calculating minimum spanning tree matrix')
def _save_mst_tile(tile, i, preread_ifgs):
"""
Convenient inner loop for mst tile saving
"""
mst_tile = mst.mst_multiprocessing(tile, dest_tifs, preread_ifgs, params)
# locally save the mst_mat
mst_file_process_n = join(params[cf.TMPDIR], 'mst_mat_{}.npy'.format(i))
np.save(file=mst_file_process_n, arr=mst_tile)
for t in process_tiles:
_save_mst_tile(t, t.index, preread_ifgs)
log.debug('Finished mst calculation for process {}'.format(mpiops.rank))
mpiops.comm.barrier()
def _ref_pixel_calc(ifg_paths: List[str], params: dict) -> Tuple[int, int]:
"""
Wrapper for reference pixel calculation
"""
lon = params[cf.REFX]
lat = params[cf.REFY]
ifg = Ifg(ifg_paths[0])
ifg.open(readonly=True)
# assume all interferograms have same projection and will share the same transform
transform = ifg.dataset.GetGeoTransform()
if lon == -1 or lat == -1:
log.info('Searching for best reference pixel location')
half_patch_size, thresh, grid = refpixel.ref_pixel_setup(ifg_paths, params)
process_grid = mpiops.array_split(grid)
refpixel.save_ref_pixel_blocks(process_grid, half_patch_size, ifg_paths, params)
mean_sds = refpixel._ref_pixel_mpi(process_grid, half_patch_size, ifg_paths, thresh, params)
mean_sds = mpiops.comm.gather(mean_sds, root=0)
if mpiops.rank == MASTER_PROCESS:
mean_sds = np.hstack(mean_sds)
refpixel_returned = mpiops.run_once(refpixel.find_min_mean, mean_sds, grid)
if isinstance(refpixel_returned, ValueError):
from pyrate.core.refpixel import RefPixelError
raise RefPixelError(
"Reference pixel calculation returned an all nan slice!\n"
"Cannot continue downstream computation. Please change reference pixel algorithm used before "
"continuing.")
refy, refx = refpixel_returned # row first means first value is latitude
log.info('Selected reference pixel coordinate (x, y): ({}, {})'.format(refx, refy))
lon, lat = refpixel.convert_pixel_value_to_geographic_coordinate(refx, refy, transform)
log.info('Selected reference pixel coordinate (lon, lat): ({}, {})'.format(lon, lat))
else:
log.info('Using reference pixel from config file (lon, lat): ({}, {})'.format(lon, lat))
log.warning("Ensure user supplied reference pixel values are in lon/lat")
refx, refy = refpixel.convert_geographic_coordinate_to_pixel_value(lon, lat, transform)
log.info('Converted reference pixel coordinate (x, y): ({}, {})'.format(refx, refy))
refpixel.update_refpix_metadata(ifg_paths, refx, refy, transform, params)
log.debug("refpx, refpy: "+str(refx) + " " + str(refy))
ifg.close()
return int(refx), int(refy)
def _orb_fit_calc(multi_paths: List[MultiplePaths], params, preread_ifgs=None) -> None:
"""
MPI wrapper for orbital fit correction
"""
if not params[cf.ORBITAL_FIT]:
log.info('Orbital correction not required!')
print('Orbital correction not required!')
return
log.info('Calculating orbital correction')
ifg_paths = [p.sampled_path for p in multi_paths]
if preread_ifgs: # don't check except for mpi tests
# perform some general error/sanity checks
log.debug('Checking Orbital error correction status')
if mpiops.run_once(shared.check_correction_status, ifg_paths, ifc.PYRATE_ORBITAL_ERROR):
log.debug('Orbital error correction not required as all ifgs are already corrected!')
return # return if True condition returned
if params[cf.ORBITAL_FIT_METHOD] == 1:
prcs_ifgs = mpiops.array_split(ifg_paths)
orbital.remove_orbital_error(prcs_ifgs, params, preread_ifgs)
else:
# Here we do all the multilooking in one process, but in memory
# can use multiple processes if we write data to disc during
# remove_orbital_error step
# A performance comparison should be made for saving multilooked
# files on disc vs in memory single process multilooking
if mpiops.rank == MASTER_PROCESS:
headers = [find_header(p, params) for p in multi_paths]
orbital.remove_orbital_error(ifg_paths, params, headers, preread_ifgs=preread_ifgs)
mpiops.comm.barrier()
log.debug('Finished Orbital error correction')
def _ref_phase_estimation(ifg_paths, params, refpx, refpy):
"""
Wrapper for reference phase estimation.
"""
log.info("Calculating reference phase and correcting each interferogram")
if len(ifg_paths) < 2:
raise rpe.ReferencePhaseError(
"At least two interferograms required for reference phase correction ({len_ifg_paths} "
"provided).".format(len_ifg_paths=len(ifg_paths))
)
if mpiops.run_once(shared.check_correction_status, ifg_paths, ifc.PYRATE_REF_PHASE):
log.debug('Finished reference phase correction')
return
if params[cf.REF_EST_METHOD] == 1:
ref_phs = rpe.est_ref_phase_method1(ifg_paths, params)
elif params[cf.REF_EST_METHOD] == 2:
ref_phs = rpe.est_ref_phase_method2(ifg_paths, params, refpx, refpy)
else:
raise rpe.ReferencePhaseError("No such option, use '1' or '2'.")
# Save reference phase numpy arrays to disk.
ref_phs_file = os.path.join(params[cf.TMPDIR], 'ref_phs.npy')
if mpiops.rank == MASTER_PROCESS:
collected_ref_phs = np.zeros(len(ifg_paths), dtype=np.float64)
process_indices = mpiops.array_split(range(len(ifg_paths)))
collected_ref_phs[process_indices] = ref_phs
for r in range(1, mpiops.size):
process_indices = mpiops.array_split(range(len(ifg_paths)), r)
this_process_ref_phs = np.zeros(shape=len(process_indices),
dtype=np.float64)
mpiops.comm.Recv(this_process_ref_phs, source=r, tag=r)
collected_ref_phs[process_indices] = this_process_ref_phs
np.save(file=ref_phs_file, arr=collected_ref_phs)
else:
mpiops.comm.Send(ref_phs, dest=MASTER_PROCESS, tag=mpiops.rank)
log.debug('Finished reference phase correction')
# Preserve old return value so tests don't break.
if isinstance(ifg_paths[0], Ifg):
ifgs = ifg_paths
else:
ifgs = [Ifg(ifg_path) for ifg_path in ifg_paths]
mpiops.comm.barrier()
return ref_phs, ifgs
def main(params):
"""
Top level function to perform PyRate workflow on given interferograms
:return: refpt: tuple of reference pixel x and y position
:rtype: tuple
:return: maxvar: array of maximum variance values of interferograms
:rtype: ndarray
:return: vcmt: Variance-covariance matrix array
:rtype: ndarray
"""
mpi_vs_multiprocess_logging("process", params)
ifg_paths = []
for ifg_path in params[cf.INTERFEROGRAM_FILES]:
|
rows, cols = params["rows"], params["cols"]
return process_ifgs(ifg_paths, params, rows, cols)
def process_ifgs(ifg_paths, params, rows
|
ifg_paths.append(ifg_path.sampled_path)
|
conditional_block
|
corpora.py
|
component from its storage directory
"""
path = os.path.join(self.storagedir, which)
print("Loading from", path)
with open(path, "rb") as handle:
setattr(self, which, _pickle.load(handle))
def load_full(self):
"""
Load the entire corpus from its storage directory
"""
for filename in self.FILENAMES:
self.load(filename)
self.reverse_dicts()
def load_select(self, selected):
"""
Load selected components (from list) from corpus storage directory
"""
for filename in selected:
self.load(filename)
if "worddict" in selected and "classdict" in selected:
self.reverse_dicts()
def get_steps_per_epoch(self, dataset, batchsize):
"""
Returns the number of steps that are necessary to generate all samples exactly once.
dataset: one of 'train', 'dev', 'test', 'hybrid'
batchsize: batch size that the generator will be working on
"""
self.load_if_necessary("X")
num_samples = len(self.X[dataset])
if num_samples % batchsize == 0:
return num_samples // batchsize
return num_samples // batchsize + 1 # account for the smaller last batch if necessary
def trim_and_pad_batch(self, batch):
"""
Trim all samples in a batch to MAXLENGTH and pad them to identical lengths.
"""
maxlength = min(self.MAXLENGTH, max([len(x) for x in batch]))
batch = [x[:maxlength] for x in batch]
batch = [np.concatenate([x, np.zeros(maxlength - x.shape[0])]) for x in batch]
return batch
def load_if_necessary(self, which):
"""
Load corpus component only if it has not yet been loaded
"""
if not hasattr(self, which):
self.load(which)
def load_select_if_necessary(self, selected):
"""
Load selected corpus components only if they have not yet been loaded
"""
for which in selected:
self.load_if_necessary(which)
if "worddict" in selected and "classdict" in selected:
self.reverse_dicts()
def get_generator(self, dataset, batchsize, shuffle = False):
"""
Returns a generator that will generate (X,Y) pairs for the given dataset.
dataset: one of 'train', 'dev', 'test', 'hybrid'
batchsize: batch size that the generator will be working on
shuffle: if true, the dataset is shuffled at the beginning of every epoch
"""
self.load_select_if_necessary(("X", "Y"))
random_state = np.random.RandomState(0)
while True:
indices = list(range(len(self.X[dataset])))
if shuffle:
random_state.shuffle(indices)
X = [self.X[dataset][idx] for idx in indices]
Y = [self.Y[dataset][idx] for idx in indices]
for idx in range(0, len(X), batchsize):
batch_X = X[idx:min(idx + batchsize, len(X))]
batch_Y = Y[idx:min(idx + batchsize, len(X))]
batch_X = np.array(self.trim_and_pad_batch(batch_X))
yield(batch_X, np.array(batch_Y))
def sanity_check(self):
"""
A number of checks to make sure that data is generated correctly
"""
self.load_full()
generators_not_shuffling = {dataset: self.get_generator(dataset, 16, False) for dataset in self.DATASETS}
generators_shuffling = {dataset: self.get_generator(dataset, 16, True) for dataset in self.DATASETS}
steps_per_epoch = {dataset: self.get_steps_per_epoch(dataset, 16) for dataset in self.DATASETS}
# make sure that non-shuffling generators return data in the same order every epoch
# and that shuffling generators don't
for dataset in self.DATASETS:
print(dataset)
assert len(self.X[dataset]) == len(self.Y[dataset])
for _ in range(50):
x1, y1 = next(generators_not_shuffling[dataset])
for _ in range(steps_per_epoch[dataset]):
x2, y2 = next(generators_not_shuffling[dataset])
assert np.allclose(x1, x2)
assert np.allclose(y1, y2)
for _ in range(50):
x1, y1 = next(generators_shuffling[dataset])
for _ in range(steps_per_epoch[dataset]):
x2, y2 = next(generators_shuffling[dataset])
assert x1.shape != x2.shape or not np.allclose(x1, x2)
if dataset != "hybrid":
assert not np.allclose(y1, y2)
# display some data
for k in (6, 77, 99):
for _ in range(k):
x, y = next(generators_shuffling[dataset])
words = [self.rev_worddict[word] for word in x[0] if word > 0]
label = self.rev_classdict[y[0]]
text = " ".join(words)
print(label)
print(text)
print()
print("Hybrid documents")
generator_hybrid = self.get_generator("hybrid", 1)
counter = -1
for k in (55, 66, 999):
for _ in range(k):
x, y = next(generator_hybrid)
counter += 1
words = [self.rev_worddict[word] for word in x[0] if word > 0]
labels = ["(" + self.rev_classdict[label] + ")" for label in self.GT[counter]]
text = " ".join(word + " " + label for word, label in zip(words, labels))
print(text)
print()
def delete_empty_documents(self, dataset):
"""
Delete any documents that do not contain any words (i.e., that were blank-only).
dataset: one of 'train', 'dev', 'test', 'hybrid'
"""
print("Deleting empty documents in", dataset)
number_documents = len(self.raw_documents[dataset])
indices = list(filter(lambda x:len(self.raw_documents[dataset][x].strip()), range(number_documents)))
self.raw_documents[dataset] = [self.raw_documents[dataset][idx] for idx in indices]
self.Y[dataset] = [self.Y[dataset][idx] for idx in indices]
def tokenize_documents(self, dataset):
print("Word-tokenizing documents in", dataset)
self.tokenized_documents[dataset] = [word_tokenize(document) for document in self.raw_documents[dataset]]
def shuffle_dataset(self, dataset):
print("Shuffling dataset", dataset)
indices = list(range(len(self.X[dataset])))
np.random.seed(0)
np.random.shuffle(indices)
self.X[dataset] = [self.X[dataset][idx] for idx in indices]
self.Y[dataset] = [self.Y[dataset][idx] for idx in indices]
self.tokenized_documents[dataset] = [self.tokenized_documents[dataset][idx] for idx in indices]
self.raw_documents[dataset] = [self.raw_documents[dataset][idx] for idx in indices]
def make_X(self, dataset):
"""
Create word index arrays from the tokenized documents.
The word index arrays serve as input to training/evaluation/relevance scoring.
"""
print("Making X", dataset)
self.X[dataset] = []
for document in self.tokenized_documents[dataset]:
array = np.array([self.worddict.get(word, self.worddict["__oov__"]) for word in document])
self.X[dataset].append(array)
def make_hybrid(self):
"""
Create hybrid documents by:
1) sentence-tokenizing the raw documents in the test set
2) shuffling all sentences
3) re-concatenating the sentences
"""
print("Making hybrid documents")
self.X["hybrid"] = []
self.tokenized_documents["hybrid"] = []
self.GT = []
all_sentences = []
for document, label in zip(self.raw_documents["test"], self.Y["test"]):
sentences = sent_tokenize(document)
for sentence in sentences:
all_sentences.append((sentence, label))
np.random.seed(0)
np.random.shuffle(all_sentences)
for i in range(0, len(all_sentences), self.HYBRID_LENGTH):
|
batch = all_sentences[i:min(i+self.HYBRID_LENGTH, len(all_sentences))]
hybrid_tokenized_document = []
hybrid_X = []
hybrid_labels = []
for sentence, label in batch:
for word in word_tokenize(sentence):
hybrid_tokenized_document.append(word)
hybrid_X.append(self.worddict.get(word, self.worddict["__oov__"]))
hybrid_labels.append(label)
self.X["hybrid"].append(np.array(hybrid_X))
self.tokenized_documents["hybrid"].append(hybrid_tokenized_document)
self.GT.append(np.array(hybrid_labels))
|
conditional_block
|
|
corpora.py
|
.EMB_SIZE))
counter = 0
words = []
weights_tmp = []
with open(self.embeddingpath) as handle:
for i, line in enumerate(handle):
tmp = line.strip()
if len(tmp) > 0:
split = tmp.split(" ")
if split[0] in self.worddict and len(split[1:]) == 300:
words.append(split[0])
weights_tmp.append([float(a) for a in split[1:]])
weights_tmp = np.array(weights_tmp)
for word, column in zip(words, weights_tmp):
if self.worddict[word] < self.FREQCAP:
counter += 1
weights[self.worddict[word],:] = column
print("Set", counter, "of", weights.shape[0], "columns")
if self.EMB_SIZE < weights.shape[-1]:
print("Reducing dimensionality to", self.EMB_SIZE)
pca = PCA(self.EMB_SIZE)
weights = pca.fit_transform(weights)
self.embeddings = [weights]
def reverse_dicts(self):
"""
Reverse class and word dicts; important for printing + sanity checks
"""
self.rev_worddict = {self.worddict[word]: word for word in self.worddict}
self.rev_classdict = {self.classdict[cl]: cl for cl in self.classdict}
def store(self):
"""
Store corpus to its storage directory
"""
print("Storing to", self.storagedir)
for filename in self.FILENAMES:
with open(os.path.join(self.storagedir, filename), "wb") as handle:
_pickle.dump(getattr(self, filename), handle)
def load(self, which):
"""
Load a corpus component from its storage directory
"""
path = os.path.join(self.storagedir, which)
print("Loading from", path)
with open(path, "rb") as handle:
setattr(self, which, _pickle.load(handle))
def load_full(self):
"""
Load the entire corpus from its storage directory
"""
for filename in self.FILENAMES:
self.load(filename)
self.reverse_dicts()
def load_select(self, selected):
"""
Load selected components (from list) from corpus storage directory
"""
for filename in selected:
self.load(filename)
if "worddict" in selected and "classdict" in selected:
self.reverse_dicts()
def get_steps_per_epoch(self, dataset, batchsize):
"""
Returns the number of steps that are necessary to generate all samples exactly once.
dataset: one of 'train', 'dev', 'test', 'hybrid'
batchsize: batch size that the generator will be working on
"""
self.load_if_necessary("X")
num_samples = len(self.X[dataset])
if num_samples % batchsize == 0:
return num_samples // batchsize
return num_samples // batchsize + 1 # account for the smaller last batch if necessary
def trim_and_pad_batch(self, batch):
"""
Trim all samples in a batch to MAXLENGTH and pad them to identical lengths.
"""
maxlength = min(self.MAXLENGTH, max([len(x) for x in batch]))
batch = [x[:maxlength] for x in batch]
batch = [np.concatenate([x, np.zeros(maxlength - x.shape[0])]) for x in batch]
return batch
def load_if_necessary(self, which):
"""
Load corpus component only if it has not yet been loaded
"""
if not hasattr(self, which):
self.load(which)
def load_select_if_necessary(self, selected):
"""
Load selected corpus components only if they have not yet been loaded
"""
for which in selected:
self.load_if_necessary(which)
if "worddict" in selected and "classdict" in selected:
self.reverse_dicts()
def get_generator(self, dataset, batchsize, shuffle = False):
"""
Returns a generator that will generate (X,Y) pairs for the given dataset.
dataset: one of 'train', 'dev', 'test', 'hybrid'
batchsize: batch size that the generator will be working on
shuffle: if true, the dataset is shuffled at the beginning of every epoch
"""
self.load_select_if_necessary(("X", "Y"))
random_state = np.random.RandomState(0)
while True:
indices = list(range(len(self.X[dataset])))
if shuffle:
random_state.shuffle(indices)
X = [self.X[dataset][idx] for idx in indices]
Y = [self.Y[dataset][idx] for idx in indices]
for idx in range(0, len(X), batchsize):
batch_X = X[idx:min(idx + batchsize, len(X))]
batch_Y = Y[idx:min(idx + batchsize, len(X))]
batch_X = np.array(self.trim_and_pad_batch(batch_X))
yield(batch_X, np.array(batch_Y))
def sanity_check(self):
"""
A number of checks to make sure that data is generated correctly
"""
self.load_full()
generators_not_shuffling = {dataset: self.get_generator(dataset, 16, False) for dataset in self.DATASETS}
generators_shuffling = {dataset: self.get_generator(dataset, 16, True) for dataset in self.DATASETS}
steps_per_epoch = {dataset: self.get_steps_per_epoch(dataset, 16) for dataset in self.DATASETS}
# make sure that non-shuffling generators return data in the same order every epoch
# and that shuffling generators don't
for dataset in self.DATASETS:
print(dataset)
assert len(self.X[dataset]) == len(self.Y[dataset])
for _ in range(50):
x1, y1 = next(generators_not_shuffling[dataset])
for _ in range(steps_per_epoch[dataset]):
x2, y2 = next(generators_not_shuffling[dataset])
assert np.allclose(x1, x2)
assert np.allclose(y1, y2)
for _ in range(50):
x1, y1 = next(generators_shuffling[dataset])
for _ in range(steps_per_epoch[dataset]):
x2, y2 = next(generators_shuffling[dataset])
assert x1.shape != x2.shape or not np.allclose(x1, x2)
if dataset != "hybrid":
assert not np.allclose(y1, y2)
# display some data
for k in (6, 77, 99):
for _ in range(k):
x, y = next(generators_shuffling[dataset])
words = [self.rev_worddict[word] for word in x[0] if word > 0]
label = self.rev_classdict[y[0]]
text = " ".join(words)
print(label)
print(text)
print()
print("Hybrid documents")
generator_hybrid = self.get_generator("hybrid", 1)
counter = -1
for k in (55, 66, 999):
for _ in range(k):
x, y = next(generator_hybrid)
counter += 1
words = [self.rev_worddict[word] for word in x[0] if word > 0]
labels = ["(" + self.rev_classdict[label] + ")" for label in self.GT[counter]]
text = " ".join(word + " " + label for word, label in zip(words, labels))
print(text)
print()
def delete_empty_documents(self, dataset):
"""
Delete any documents that do not contain any words (i.e., that were blank-only).
dataset: one of 'train', 'dev', 'test', 'hybrid'
"""
print("Deleting empty documents in", dataset)
number_documents = len(self.raw_documents[dataset])
indices = list(filter(lambda x:len(self.raw_documents[dataset][x].strip()), range(number_documents)))
self.raw_documents[dataset] = [self.raw_documents[dataset][idx] for idx in indices]
self.Y[dataset] = [self.Y[dataset][idx] for idx in indices]
def
|
(self, dataset):
print("Word-tokenizing documents in", dataset)
self.tokenized_documents[dataset] = [word_tokenize(document) for document in self.raw_documents[dataset]]
def shuffle_dataset(self, dataset):
print("Shuffling dataset", dataset)
indices = list(range(len(self.X[dataset])))
np.random.seed(0)
np.random.shuffle(indices)
self.X[dataset] = [self.X[dataset][idx] for idx in indices]
self.Y[dataset] = [self.Y[dataset][idx] for idx in indices]
self.tokenized_documents[dataset] = [self.tokenized_documents[dataset][idx] for idx in indices]
self.raw_documents[dataset] = [self.raw_documents[dataset][idx] for idx in indices]
def make_X(self, dataset):
"""
Create word
|
tokenize_documents
|
identifier_name
|
corpora.py
|
self.EMB_SIZE))
counter = 0
words = []
weights_tmp = []
with open(self.embeddingpath) as handle:
for i, line in enumerate(handle):
tmp = line.strip()
if len(tmp) > 0:
split = tmp.split(" ")
if split[0] in self.worddict and len(split[1:]) == 300:
words.append(split[0])
weights_tmp.append([float(a) for a in split[1:]])
weights_tmp = np.array(weights_tmp)
for word, column in zip(words, weights_tmp):
if self.worddict[word] < self.FREQCAP:
counter += 1
weights[self.worddict[word],:] = column
print("Set", counter, "of", weights.shape[0], "columns")
if self.EMB_SIZE < weights.shape[-1]:
print("Reducing dimensionality to", self.EMB_SIZE)
pca = PCA(self.EMB_SIZE)
weights = pca.fit_transform(weights)
self.embeddings = [weights]
def reverse_dicts(self):
"""
Reverse class and word dicts; important for printing + sanity checks
"""
self.rev_worddict = {self.worddict[word]: word for word in self.worddict}
self.rev_classdict = {self.classdict[cl]: cl for cl in self.classdict}
def store(self):
"""
|
for filename in self.FILENAMES:
with open(os.path.join(self.storagedir, filename), "wb") as handle:
_pickle.dump(getattr(self, filename), handle)
def load(self, which):
"""
Load a corpus component from its storage directory
"""
path = os.path.join(self.storagedir, which)
print("Loading from", path)
with open(path, "rb") as handle:
setattr(self, which, _pickle.load(handle))
def load_full(self):
"""
Load the entire corpus from its storage directory
"""
for filename in self.FILENAMES:
self.load(filename)
self.reverse_dicts()
def load_select(self, selected):
"""
Load selected components (from list) from corpus storage directory
"""
for filename in selected:
self.load(filename)
if "worddict" in selected and "classdict" in selected:
self.reverse_dicts()
def get_steps_per_epoch(self, dataset, batchsize):
"""
Returns the number of steps that are necessary to generate all samples exactly once.
dataset: one of 'train', 'dev', 'test', 'hybrid'
batchsize: batch size that the generator will be working on
"""
self.load_if_necessary("X")
num_samples = len(self.X[dataset])
if num_samples % batchsize == 0:
return num_samples // batchsize
return num_samples // batchsize + 1 # account for the smaller last batch if necessary
def trim_and_pad_batch(self, batch):
"""
Trim all samples in a batch to MAXLENGTH and pad them to identical lengths.
"""
maxlength = min(self.MAXLENGTH, max([len(x) for x in batch]))
batch = [x[:maxlength] for x in batch]
batch = [np.concatenate([x, np.zeros(maxlength - x.shape[0])]) for x in batch]
return batch
def load_if_necessary(self, which):
"""
Load corpus component only if it has not yet been loaded
"""
if not hasattr(self, which):
self.load(which)
def load_select_if_necessary(self, selected):
"""
Load selected corpus components only if they have not yet been loaded
"""
for which in selected:
self.load_if_necessary(which)
if "worddict" in selected and "classdict" in selected:
self.reverse_dicts()
def get_generator(self, dataset, batchsize, shuffle = False):
"""
Returns a generator that will generate (X,Y) pairs for the given dataset.
dataset: one of 'train', 'dev', 'test', 'hybrid'
batchsize: batch size that the generator will be working on
shuffle: if true, the dataset is shuffled at the beginning of every epoch
"""
self.load_select_if_necessary(("X", "Y"))
random_state = np.random.RandomState(0)
while True:
indices = list(range(len(self.X[dataset])))
if shuffle:
random_state.shuffle(indices)
X = [self.X[dataset][idx] for idx in indices]
Y = [self.Y[dataset][idx] for idx in indices]
for idx in range(0, len(X), batchsize):
batch_X = X[idx:min(idx + batchsize, len(X))]
batch_Y = Y[idx:min(idx + batchsize, len(X))]
batch_X = np.array(self.trim_and_pad_batch(batch_X))
yield(batch_X, np.array(batch_Y))
def sanity_check(self):
"""
A number of checks to make sure that data is generated correctly
"""
self.load_full()
generators_not_shuffling = {dataset: self.get_generator(dataset, 16, False) for dataset in self.DATASETS}
generators_shuffling = {dataset: self.get_generator(dataset, 16, True) for dataset in self.DATASETS}
steps_per_epoch = {dataset: self.get_steps_per_epoch(dataset, 16) for dataset in self.DATASETS}
# make sure that non-shuffling generators return data in the same order every epoch
# and that shuffling generators don't
for dataset in self.DATASETS:
print(dataset)
assert len(self.X[dataset]) == len(self.Y[dataset])
for _ in range(50):
x1, y1 = next(generators_not_shuffling[dataset])
for _ in range(steps_per_epoch[dataset]):
x2, y2 = next(generators_not_shuffling[dataset])
assert np.allclose(x1, x2)
assert np.allclose(y1, y2)
for _ in range(50):
x1, y1 = next(generators_shuffling[dataset])
for _ in range(steps_per_epoch[dataset]):
x2, y2 = next(generators_shuffling[dataset])
assert x1.shape != x2.shape or not np.allclose(x1, x2)
if dataset != "hybrid":
assert not np.allclose(y1, y2)
# display some data
for k in (6, 77, 99):
for _ in range(k):
x, y = next(generators_shuffling[dataset])
words = [self.rev_worddict[word] for word in x[0] if word > 0]
label = self.rev_classdict[y[0]]
text = " ".join(words)
print(label)
print(text)
print()
print("Hybrid documents")
generator_hybrid = self.get_generator("hybrid", 1)
counter = -1
for k in (55, 66, 999):
for _ in range(k):
x, y = next(generator_hybrid)
counter += 1
words = [self.rev_worddict[word] for word in x[0] if word > 0]
labels = ["(" + self.rev_classdict[label] + ")" for label in self.GT[counter]]
text = " ".join(word + " " + label for word, label in zip(words, labels))
print(text)
print()
def delete_empty_documents(self, dataset):
"""
Delete any documents that do not contain any words (i.e., that were blank-only).
dataset: one of 'train', 'dev', 'test', 'hybrid'
"""
print("Deleting empty documents in", dataset)
number_documents = len(self.raw_documents[dataset])
indices = list(filter(lambda x:len(self.raw_documents[dataset][x].strip()), range(number_documents)))
self.raw_documents[dataset] = [self.raw_documents[dataset][idx] for idx in indices]
self.Y[dataset] = [self.Y[dataset][idx] for idx in indices]
def tokenize_documents(self, dataset):
print("Word-tokenizing documents in", dataset)
self.tokenized_documents[dataset] = [word_tokenize(document) for document in self.raw_documents[dataset]]
def shuffle_dataset(self, dataset):
print("Shuffling dataset", dataset)
indices = list(range(len(self.X[dataset])))
np.random.seed(0)
np.random.shuffle(indices)
self.X[dataset] = [self.X[dataset][idx] for idx in indices]
self.Y[dataset] = [self.Y[dataset][idx] for idx in indices]
self.tokenized_documents[dataset] = [self.tokenized_documents[dataset][idx] for idx in indices]
self.raw_documents[dataset] = [self.raw_documents[dataset][idx] for idx in indices]
def make_X(self, dataset):
"""
Create word index
|
Store corpus to its storage directory
"""
print("Storing to", self.storagedir)
|
random_line_split
|
corpora.py
|
self.EMB_SIZE))
counter = 0
words = []
weights_tmp = []
with open(self.embeddingpath) as handle:
for i, line in enumerate(handle):
tmp = line.strip()
if len(tmp) > 0:
split = tmp.split(" ")
if split[0] in self.worddict and len(split[1:]) == 300:
words.append(split[0])
weights_tmp.append([float(a) for a in split[1:]])
weights_tmp = np.array(weights_tmp)
for word, column in zip(words, weights_tmp):
if self.worddict[word] < self.FREQCAP:
counter += 1
weights[self.worddict[word],:] = column
print("Set", counter, "of", weights.shape[0], "columns")
if self.EMB_SIZE < weights.shape[-1]:
print("Reducing dimensionality to", self.EMB_SIZE)
pca = PCA(self.EMB_SIZE)
weights = pca.fit_transform(weights)
self.embeddings = [weights]
def reverse_dicts(self):
"""
Reverse class and word dicts; important for printing + sanity checks
"""
self.rev_worddict = {self.worddict[word]: word for word in self.worddict}
self.rev_classdict = {self.classdict[cl]: cl for cl in self.classdict}
def store(self):
"""
Store corpus to its storage directory
"""
print("Storing to", self.storagedir)
for filename in self.FILENAMES:
with open(os.path.join(self.storagedir, filename), "wb") as handle:
_pickle.dump(getattr(self, filename), handle)
def load(self, which):
"""
Load a corpus component from its storage directory
"""
path = os.path.join(self.storagedir, which)
print("Loading from", path)
with open(path, "rb") as handle:
setattr(self, which, _pickle.load(handle))
def load_full(self):
"""
Load the entire corpus from its storage directory
"""
for filename in self.FILENAMES:
self.load(filename)
self.reverse_dicts()
def load_select(self, selected):
"""
Load selected components (from list) from corpus storage directory
"""
for filename in selected:
self.load(filename)
if "worddict" in selected and "classdict" in selected:
self.reverse_dicts()
def get_steps_per_epoch(self, dataset, batchsize):
"""
Returns the number of steps that are necessary to generate all samples exactly once.
dataset: one of 'train', 'dev', 'test', 'hybrid'
batchsize: batch size that the generator will be working on
"""
self.load_if_necessary("X")
num_samples = len(self.X[dataset])
if num_samples % batchsize == 0:
return num_samples // batchsize
return num_samples // batchsize + 1 # account for the smaller last batch if necessary
def trim_and_pad_batch(self, batch):
"""
Trim all samples in a batch to MAXLENGTH and pad them to identical lengths.
"""
maxlength = min(self.MAXLENGTH, max([len(x) for x in batch]))
batch = [x[:maxlength] for x in batch]
batch = [np.concatenate([x, np.zeros(maxlength - x.shape[0])]) for x in batch]
return batch
def load_if_necessary(self, which):
"""
Load corpus component only if it has not yet been loaded
"""
if not hasattr(self, which):
self.load(which)
def load_select_if_necessary(self, selected):
|
def get_generator(self, dataset, batchsize, shuffle = False):
"""
Returns a generator that will generate (X,Y) pairs for the given dataset.
dataset: one of 'train', 'dev', 'test', 'hybrid'
batchsize: batch size that the generator will be working on
shuffle: if true, the dataset is shuffled at the beginning of every epoch
"""
self.load_select_if_necessary(("X", "Y"))
random_state = np.random.RandomState(0)
while True:
indices = list(range(len(self.X[dataset])))
if shuffle:
random_state.shuffle(indices)
X = [self.X[dataset][idx] for idx in indices]
Y = [self.Y[dataset][idx] for idx in indices]
for idx in range(0, len(X), batchsize):
batch_X = X[idx:min(idx + batchsize, len(X))]
batch_Y = Y[idx:min(idx + batchsize, len(X))]
batch_X = np.array(self.trim_and_pad_batch(batch_X))
yield(batch_X, np.array(batch_Y))
def sanity_check(self):
"""
A number of checks to make sure that data is generated correctly
"""
self.load_full()
generators_not_shuffling = {dataset: self.get_generator(dataset, 16, False) for dataset in self.DATASETS}
generators_shuffling = {dataset: self.get_generator(dataset, 16, True) for dataset in self.DATASETS}
steps_per_epoch = {dataset: self.get_steps_per_epoch(dataset, 16) for dataset in self.DATASETS}
# make sure that non-shuffling generators return data in the same order every epoch
# and that shuffling generators don't
for dataset in self.DATASETS:
print(dataset)
assert len(self.X[dataset]) == len(self.Y[dataset])
for _ in range(50):
x1, y1 = next(generators_not_shuffling[dataset])
for _ in range(steps_per_epoch[dataset]):
x2, y2 = next(generators_not_shuffling[dataset])
assert np.allclose(x1, x2)
assert np.allclose(y1, y2)
for _ in range(50):
x1, y1 = next(generators_shuffling[dataset])
for _ in range(steps_per_epoch[dataset]):
x2, y2 = next(generators_shuffling[dataset])
assert x1.shape != x2.shape or not np.allclose(x1, x2)
if dataset != "hybrid":
assert not np.allclose(y1, y2)
# display some data
for k in (6, 77, 99):
for _ in range(k):
x, y = next(generators_shuffling[dataset])
words = [self.rev_worddict[word] for word in x[0] if word > 0]
label = self.rev_classdict[y[0]]
text = " ".join(words)
print(label)
print(text)
print()
print("Hybrid documents")
generator_hybrid = self.get_generator("hybrid", 1)
counter = -1
for k in (55, 66, 999):
for _ in range(k):
x, y = next(generator_hybrid)
counter += 1
words = [self.rev_worddict[word] for word in x[0] if word > 0]
labels = ["(" + self.rev_classdict[label] + ")" for label in self.GT[counter]]
text = " ".join(word + " " + label for word, label in zip(words, labels))
print(text)
print()
def delete_empty_documents(self, dataset):
"""
Delete any documents that do not contain any words (i.e., that were blank-only).
dataset: one of 'train', 'dev', 'test', 'hybrid'
"""
print("Deleting empty documents in", dataset)
number_documents = len(self.raw_documents[dataset])
indices = list(filter(lambda x:len(self.raw_documents[dataset][x].strip()), range(number_documents)))
self.raw_documents[dataset] = [self.raw_documents[dataset][idx] for idx in indices]
self.Y[dataset] = [self.Y[dataset][idx] for idx in indices]
def tokenize_documents(self, dataset):
print("Word-tokenizing documents in", dataset)
self.tokenized_documents[dataset] = [word_tokenize(document) for document in self.raw_documents[dataset]]
def shuffle_dataset(self, dataset):
print("Shuffling dataset", dataset)
indices = list(range(len(self.X[dataset])))
np.random.seed(0)
np.random.shuffle(indices)
self.X[dataset] = [self.X[dataset][idx] for idx in indices]
self.Y[dataset] = [self.Y[dataset][idx] for idx in indices]
self.tokenized_documents[dataset] = [self.tokenized_documents[dataset][idx] for idx in indices]
self.raw_documents[dataset] = [self.raw_documents[dataset][idx] for idx in indices]
def make_X(self, dataset):
"""
Create word index
|
"""
Load selected corpus components only if they have not yet been loaded
"""
for which in selected:
self.load_if_necessary(which)
if "worddict" in selected and "classdict" in selected:
self.reverse_dicts()
|
identifier_body
|
trainer.py
|
Denormalize
import cv2
import numpy as np
from utils.gradcam import *
class Trainer(nn.Module):
def __init__(self, config, model, train_loader, val_loader, **kwargs):
super().__init__()
self.config = config
self.model = model
self.train_loader = train_loader
self.val_loader = val_loader
self.optimizer = model.optimizer
self.criterion = model.criterion
self.metrics = model.metrics # list of classification metrics
self.set_attribute(kwargs)
def logged(self, logs):
tags = [tag for tag in logs.keys()]
values = [value for value in logs.values()]
self.logger.write(tags=tags, values=values)
def fit(self, start_epoch=0, start_iter=0, num_epochs=10, print_per_iter=None):
self.num_epochs = num_epochs
self.num_iters = num_epochs * len(self.train_loader)
if self.checkpoint is None:
self.checkpoint = CheckPoint(save_per_epoch=int(num_epochs/10) + 1)
if print_per_iter is not None:
self.print_per_iter = print_per_iter
else:
self.print_per_iter = int(len(self.train_loader) / 10)
self.epoch = start_epoch
# For one-cycle lr only
if self.scheduler is not None and self.step_per_epoch:
self.scheduler.last_epoch = start_epoch - 1
self.start_iter = start_iter % len(self.train_loader)
print(f'===========================START TRAINING=================================')
print(f'Training for {num_epochs} epochs ...')
for epoch in range(self.epoch, self.num_epochs):
try:
self.epoch = epoch
self.train_per_epoch()
if self.num_evaluate_per_epoch != 0:
if epoch % self.num_evaluate_per_epoch == 0 and epoch+1 >= self.num_evaluate_per_epoch:
self.evaluate_per_epoch()
if self.scheduler is not None and self.step_per_epoch:
self.scheduler.step()
lrl = [x['lr'] for x in self.optimizer.param_groups]
lr = sum(lrl) / len(lrl)
log_dict = {'Learning rate/Epoch': lr}
self.logged(log_dict)
except KeyboardInterrupt:
self.checkpoint.save(self.model, save_mode='last', epoch=self.epoch,
iters=self.iters, best_value=self.best_value)
print("Stop training, checkpoint saved...")
break
print("Training Completed!")
def train_per_epoch(self):
self.model.train()
running_loss = 0.0
running_time = 0
loop = tqdm(self.train_loader)
for i, batch in enumerate(loop):
start_time = time.time()
with torch.cuda.amp.autocast():
loss, loss_dict = self.model.training_step(batch)
if self.use_accumulate:
loss /= self.accumulate_steps
self.model.scaler(loss, self.optimizer)
if self.use_accumulate:
if (i+1) % self.accumulate_steps == 0 or i == len(self.train_loader)-1:
self.model.scaler.step(
self.optimizer, clip_grad=self.clip_grad, parameters=self.model.parameters())
self.optimizer.zero_grad()
if self.scheduler is not None and not self.step_per_epoch:
self.scheduler.step(
(self.num_epochs + i) / len(self.train_loader))
lrl = [x['lr'] for x in self.optimizer.param_groups]
lr = sum(lrl) / len(lrl)
log_dict = {'Learning rate/Iterations': lr}
self.logging(log_dict)
else:
self.model.scaler.step(
self.optimizer, clip_grad=self.clip_grad, parameters=self.model.parameters())
self.optimizer.zero_grad()
if self.scheduler is not None and not self.step_per_epoch:
# self.scheduler.step()
self.scheduler.step(
(self.num_epochs + i) / len(self.train_loader))
lrl = [x['lr'] for x in self.optimizer.param_groups]
lr = sum(lrl) / len(lrl)
log_dict = {'Learning rate/Iterations': lr}
self.logging(log_dict)
torch.cuda.synchronize()
end_time = time.time()
for (key, value) in loss_dict.items():
if key in running_loss.keys():
running_loss[key] += value
else:
running_loss[key] = value
running_time += end_time-start_time
self.iters = self.start_iter + \
len(self.train_loader)*self.epoch + i + 1
if self.iters % self.print_per_iter == 0:
for key in running_loss.keys():
running_loss[key] /= self.print_per_iter
running_loss[key] = np.round(running_loss[key], 5)
loss_string = '{}'.format(running_loss)[
1:-1].replace("'", '').replace(",", ' ||')
print("[{}|{}] [{}|{}] || {} || Time: {:10.4f}s".format(
self.epoch, self.num_epochs, self.iters, self.num_iters, loss_string, running_time))
self.logging(
{"Training Loss/Batch": running_loss['T'] / self.print_per_iter, })
running_loss = {}
running_time = 0
if (self.iters % self.checkpoint.save_per_iter == 0 or self.iters == self.num_iters - 1):
print(f'Save model at [{self.epoch}|{self.iters}] to last.pth')
self.checkpoint.save(
self.model,
save_mode='last',
epoch=self.epoch,
iters=self.iters,
best_value=self.best_value)
def evaluate_per_epoch(self):
self.model.eval()
epoch_loss = {}
metric_dict = {}
print('=============================EVALUATION===================================')
start_time = time.time()
with torch.no_grad():
for batch in tqdm(self.val_loader):
_, loss_dict = self.model.evaluate_step(batch)
for (key, val) in loss_dict.items():
if key in epoch_loss.keys():
epoch_loss[key] += val
else:
epoch_loss[key] = val
end_time = time.time()
running_time = end_time - start_time
metric_dict = self.model.get_metric_values()
self.model.reset_metrics()
for key in epoch_loss.keys():
epoch_loss[key] /= len(self.val_loader)
epoch_loss[key] = np.round(epoch_loss[key], 5)
loss_string = '{}'.format(epoch_loss)[
1:-1].replace("'", '').replace(",", ' ||')
print()
print("[{}|{}] || {} || Time: {:10.4f} s".format(
self.epoch, self.num_epochs, loss_string, running_time))
for metric, score in metric_dict.items():
print(metric + ': ' + str(score), end=' | ')
print()
print('==========================================================================')
log_dict = {
"Validation Loss/Epoch": epoch_loss['T'] / len(self.val_loader), }
log_dict.update(metric_dict)
self.logging(log_dict)
# Save model gives best mAP score
if metric_dict['acc'] > self.best_value:
self.best_value = metric_dict['acc']
self.checkpoint.save(self.model, save_mode='best', epoch=self.epoch,
iters=self.iters, best_value=self.best_value)
if self.visualize_when_val:
self.visualize_batch()
def visualize_batch(self):
# Vizualize Grad Class Activation Mapping
|
grayscale_cam, label_idx = grad_cam(inputs, target_category)
label = self.cfg.obj_list[label_idx]
img_cam = show_cam_on_image(img_show, grayscale_cam, label)
cv2.imwrite(image_outname, img_cam)
def __str__(self) -> str:
title = '------------- Model Summary ---------------\n'
name = f'Name: {self.model.name}\n'
params = f'Number of params: {self.model.trainable_parameters}\n'
train_iter_per_epoch = f'Number of train iterations per epoch: {len(self.train_loader)}\n'
val_iter_per_epoch = f'Number of val iterations per epoch: {len(self.val_loader)}'
return title + name + params + train_iter_per_epoch + val_iter_per_epoch
def print_forward_step(self):
self.model.eval()
outputs = self.model.forward_step()
print('Feedforward: output_shape: ', outputs.shape)
def set_accumulate_step(self):
self.use_accumulate = False
if self.config.total_accumulate_steps > 0:
self.use_accumulate = True
self.accumulate_steps = max(
round(self.config.total_accumulate_steps / self.config.batch_size), 1)
def set_amp(self):
self.use_amp = False
if self.config.mixed_precision:
self.use_amp
|
if not os.path.exists('./samples'):
os.mkdir('./samples')
denom = Denormalize()
batch = next(iter(self.val_loader))
images = batch["imgs"]
#targets = batch["targets"]
self.model.eval()
config_name = self.cfg.model_name.split('_')[0]
grad_cam = GradCam(model=self.model.model, config_name=config_name)
for idx, inputs in enumerate(images):
image_outname = os.path.join(
'samples', f'{self.epoch}_{self.iters}_{idx}.jpg')
img_show = denom(inputs)
inputs = inputs.unsqueeze(0)
inputs = inputs.to(self.model.device)
target_category = None
|
identifier_body
|
trainer.py
|
Denormalize
import cv2
import numpy as np
from utils.gradcam import *
class Trainer(nn.Module):
def __init__(self, config, model, train_loader, val_loader, **kwargs):
super().__init__()
self.config = config
self.model = model
self.train_loader = train_loader
self.val_loader = val_loader
self.optimizer = model.optimizer
self.criterion = model.criterion
self.metrics = model.metrics # list of classification metrics
self.set_attribute(kwargs)
def logged(self, logs):
tags = [tag for tag in logs.keys()]
values = [value for value in logs.values()]
self.logger.write(tags=tags, values=values)
def fit(self, start_epoch=0, start_iter=0, num_epochs=10, print_per_iter=None):
self.num_epochs = num_epochs
self.num_iters = num_epochs * len(self.train_loader)
if self.checkpoint is None:
|
if print_per_iter is not None:
self.print_per_iter = print_per_iter
else:
self.print_per_iter = int(len(self.train_loader) / 10)
self.epoch = start_epoch
# For one-cycle lr only
if self.scheduler is not None and self.step_per_epoch:
self.scheduler.last_epoch = start_epoch - 1
self.start_iter = start_iter % len(self.train_loader)
print(f'===========================START TRAINING=================================')
print(f'Training for {num_epochs} epochs ...')
for epoch in range(self.epoch, self.num_epochs):
try:
self.epoch = epoch
self.train_per_epoch()
if self.num_evaluate_per_epoch != 0:
if epoch % self.num_evaluate_per_epoch == 0 and epoch+1 >= self.num_evaluate_per_epoch:
self.evaluate_per_epoch()
if self.scheduler is not None and self.step_per_epoch:
self.scheduler.step()
lrl = [x['lr'] for x in self.optimizer.param_groups]
lr = sum(lrl) / len(lrl)
log_dict = {'Learning rate/Epoch': lr}
self.logged(log_dict)
except KeyboardInterrupt:
self.checkpoint.save(self.model, save_mode='last', epoch=self.epoch,
iters=self.iters, best_value=self.best_value)
print("Stop training, checkpoint saved...")
break
print("Training Completed!")
def train_per_epoch(self):
self.model.train()
running_loss = 0.0
running_time = 0
loop = tqdm(self.train_loader)
for i, batch in enumerate(loop):
start_time = time.time()
with torch.cuda.amp.autocast():
loss, loss_dict = self.model.training_step(batch)
if self.use_accumulate:
loss /= self.accumulate_steps
self.model.scaler(loss, self.optimizer)
if self.use_accumulate:
if (i+1) % self.accumulate_steps == 0 or i == len(self.train_loader)-1:
self.model.scaler.step(
self.optimizer, clip_grad=self.clip_grad, parameters=self.model.parameters())
self.optimizer.zero_grad()
if self.scheduler is not None and not self.step_per_epoch:
self.scheduler.step(
(self.num_epochs + i) / len(self.train_loader))
lrl = [x['lr'] for x in self.optimizer.param_groups]
lr = sum(lrl) / len(lrl)
log_dict = {'Learning rate/Iterations': lr}
self.logging(log_dict)
else:
self.model.scaler.step(
self.optimizer, clip_grad=self.clip_grad, parameters=self.model.parameters())
self.optimizer.zero_grad()
if self.scheduler is not None and not self.step_per_epoch:
# self.scheduler.step()
self.scheduler.step(
(self.num_epochs + i) / len(self.train_loader))
lrl = [x['lr'] for x in self.optimizer.param_groups]
lr = sum(lrl) / len(lrl)
log_dict = {'Learning rate/Iterations': lr}
self.logging(log_dict)
torch.cuda.synchronize()
end_time = time.time()
for (key, value) in loss_dict.items():
if key in running_loss.keys():
running_loss[key] += value
else:
running_loss[key] = value
running_time += end_time-start_time
self.iters = self.start_iter + \
len(self.train_loader)*self.epoch + i + 1
if self.iters % self.print_per_iter == 0:
for key in running_loss.keys():
running_loss[key] /= self.print_per_iter
running_loss[key] = np.round(running_loss[key], 5)
loss_string = '{}'.format(running_loss)[
1:-1].replace("'", '').replace(",", ' ||')
print("[{}|{}] [{}|{}] || {} || Time: {:10.4f}s".format(
self.epoch, self.num_epochs, self.iters, self.num_iters, loss_string, running_time))
self.logging(
{"Training Loss/Batch": running_loss['T'] / self.print_per_iter, })
running_loss = {}
running_time = 0
if (self.iters % self.checkpoint.save_per_iter == 0 or self.iters == self.num_iters - 1):
print(f'Save model at [{self.epoch}|{self.iters}] to last.pth')
self.checkpoint.save(
self.model,
save_mode='last',
epoch=self.epoch,
iters=self.iters,
best_value=self.best_value)
def evaluate_per_epoch(self):
self.model.eval()
epoch_loss = {}
metric_dict = {}
print('=============================EVALUATION===================================')
start_time = time.time()
with torch.no_grad():
for batch in tqdm(self.val_loader):
_, loss_dict = self.model.evaluate_step(batch)
for (key, val) in loss_dict.items():
if key in epoch_loss.keys():
epoch_loss[key] += val
else:
epoch_loss[key] = val
end_time = time.time()
running_time = end_time - start_time
metric_dict = self.model.get_metric_values()
self.model.reset_metrics()
for key in epoch_loss.keys():
epoch_loss[key] /= len(self.val_loader)
epoch_loss[key] = np.round(epoch_loss[key], 5)
loss_string = '{}'.format(epoch_loss)[
1:-1].replace("'", '').replace(",", ' ||')
print()
print("[{}|{}] || {} || Time: {:10.4f} s".format(
self.epoch, self.num_epochs, loss_string, running_time))
for metric, score in metric_dict.items():
print(metric + ': ' + str(score), end=' | ')
print()
print('==========================================================================')
log_dict = {
"Validation Loss/Epoch": epoch_loss['T'] / len(self.val_loader), }
log_dict.update(metric_dict)
self.logging(log_dict)
# Save model gives best mAP score
if metric_dict['acc'] > self.best_value:
self.best_value = metric_dict['acc']
self.checkpoint.save(self.model, save_mode='best', epoch=self.epoch,
iters=self.iters, best_value=self.best_value)
if self.visualize_when_val:
self.visualize_batch()
def visualize_batch(self):
# Vizualize Grad Class Activation Mapping
if not os.path.exists('./samples'):
os.mkdir('./samples')
denom = Denormalize()
batch = next(iter(self.val_loader))
images = batch["imgs"]
#targets = batch["targets"]
self.model.eval()
config_name = self.cfg.model_name.split('_')[0]
grad_cam = GradCam(model=self.model.model, config_name=config_name)
for idx, inputs in enumerate(images):
image_outname = os.path.join(
'samples', f'{self.epoch}_{self.iters}_{idx}.jpg')
img_show = denom(inputs)
inputs = inputs.unsqueeze(0)
inputs = inputs.to(self.model.device)
target_category = None
grayscale_cam, label_idx = grad_cam(inputs, target_category)
label = self.cfg.obj_list[label_idx]
img_cam = show_cam_on_image(img_show, grayscale_cam, label)
cv2.imwrite(image_outname, img_cam)
def __str__(self) -> str:
title = '------------- Model Summary ---------------\n'
name = f'Name: {self.model.name}\n'
params = f'Number of params: {self.model.trainable_parameters}\n'
train_iter_per_epoch = f'Number of train iterations per epoch: {len(self.train_loader)}\n'
val_iter_per_epoch = f'Number of val iterations per epoch: {len(self.val_loader)}'
return title + name + params + train_iter_per_epoch + val_iter_per_epoch
def print_forward_step(self):
self.model.eval()
outputs = self.model.forward_step()
print('Feedforward: output_shape: ', outputs.shape)
def set_accumulate_step(self):
self.use_accumulate = False
if self.config.total_accumulate_steps > 0:
self.use_accumulate = True
self.accumulate_steps = max(
round(self.config.total_accumulate_steps / self.config.batch_size), 1)
def set_amp(self):
self.use_amp = False
if self.config.mixed_precision:
self
|
self.checkpoint = CheckPoint(save_per_epoch=int(num_epochs/10) + 1)
|
conditional_block
|
trainer.py
|
Denormalize
import cv2
import numpy as np
from utils.gradcam import *
class Trainer(nn.Module):
def __init__(self, config, model, train_loader, val_loader, **kwargs):
super().__init__()
self.config = config
self.model = model
self.train_loader = train_loader
self.val_loader = val_loader
self.optimizer = model.optimizer
self.criterion = model.criterion
self.metrics = model.metrics # list of classification metrics
self.set_attribute(kwargs)
def logged(self, logs):
tags = [tag for tag in logs.keys()]
values = [value for value in logs.values()]
self.logger.write(tags=tags, values=values)
def fit(self, start_epoch=0, start_iter=0, num_epochs=10, print_per_iter=None):
self.num_epochs = num_epochs
self.num_iters = num_epochs * len(self.train_loader)
if self.checkpoint is None:
self.checkpoint = CheckPoint(save_per_epoch=int(num_epochs/10) + 1)
if print_per_iter is not None:
self.print_per_iter = print_per_iter
else:
self.print_per_iter = int(len(self.train_loader) / 10)
self.epoch = start_epoch
# For one-cycle lr only
if self.scheduler is not None and self.step_per_epoch:
self.scheduler.last_epoch = start_epoch - 1
self.start_iter = start_iter % len(self.train_loader)
print(f'===========================START TRAINING=================================')
print(f'Training for {num_epochs} epochs ...')
for epoch in range(self.epoch, self.num_epochs):
try:
self.epoch = epoch
self.train_per_epoch()
if self.num_evaluate_per_epoch != 0:
if epoch % self.num_evaluate_per_epoch == 0 and epoch+1 >= self.num_evaluate_per_epoch:
self.evaluate_per_epoch()
if self.scheduler is not None and self.step_per_epoch:
self.scheduler.step()
lrl = [x['lr'] for x in self.optimizer.param_groups]
lr = sum(lrl) / len(lrl)
log_dict = {'Learning rate/Epoch': lr}
self.logged(log_dict)
except KeyboardInterrupt:
self.checkpoint.save(self.model, save_mode='last', epoch=self.epoch,
iters=self.iters, best_value=self.best_value)
print("Stop training, checkpoint saved...")
break
print("Training Completed!")
def train_per_epoch(self):
self.model.train()
running_loss = 0.0
running_time = 0
loop = tqdm(self.train_loader)
for i, batch in enumerate(loop):
start_time = time.time()
with torch.cuda.amp.autocast():
loss, loss_dict = self.model.training_step(batch)
if self.use_accumulate:
loss /= self.accumulate_steps
self.model.scaler(loss, self.optimizer)
if self.use_accumulate:
if (i+1) % self.accumulate_steps == 0 or i == len(self.train_loader)-1:
self.model.scaler.step(
self.optimizer, clip_grad=self.clip_grad, parameters=self.model.parameters())
self.optimizer.zero_grad()
if self.scheduler is not None and not self.step_per_epoch:
self.scheduler.step(
(self.num_epochs + i) / len(self.train_loader))
lrl = [x['lr'] for x in self.optimizer.param_groups]
lr = sum(lrl) / len(lrl)
log_dict = {'Learning rate/Iterations': lr}
self.logging(log_dict)
else:
self.model.scaler.step(
self.optimizer, clip_grad=self.clip_grad, parameters=self.model.parameters())
|
(self.num_epochs + i) / len(self.train_loader))
lrl = [x['lr'] for x in self.optimizer.param_groups]
lr = sum(lrl) / len(lrl)
log_dict = {'Learning rate/Iterations': lr}
self.logging(log_dict)
torch.cuda.synchronize()
end_time = time.time()
for (key, value) in loss_dict.items():
if key in running_loss.keys():
running_loss[key] += value
else:
running_loss[key] = value
running_time += end_time-start_time
self.iters = self.start_iter + \
len(self.train_loader)*self.epoch + i + 1
if self.iters % self.print_per_iter == 0:
for key in running_loss.keys():
running_loss[key] /= self.print_per_iter
running_loss[key] = np.round(running_loss[key], 5)
loss_string = '{}'.format(running_loss)[
1:-1].replace("'", '').replace(",", ' ||')
print("[{}|{}] [{}|{}] || {} || Time: {:10.4f}s".format(
self.epoch, self.num_epochs, self.iters, self.num_iters, loss_string, running_time))
self.logging(
{"Training Loss/Batch": running_loss['T'] / self.print_per_iter, })
running_loss = {}
running_time = 0
if (self.iters % self.checkpoint.save_per_iter == 0 or self.iters == self.num_iters - 1):
print(f'Save model at [{self.epoch}|{self.iters}] to last.pth')
self.checkpoint.save(
self.model,
save_mode='last',
epoch=self.epoch,
iters=self.iters,
best_value=self.best_value)
def evaluate_per_epoch(self):
self.model.eval()
epoch_loss = {}
metric_dict = {}
print('=============================EVALUATION===================================')
start_time = time.time()
with torch.no_grad():
for batch in tqdm(self.val_loader):
_, loss_dict = self.model.evaluate_step(batch)
for (key, val) in loss_dict.items():
if key in epoch_loss.keys():
epoch_loss[key] += val
else:
epoch_loss[key] = val
end_time = time.time()
running_time = end_time - start_time
metric_dict = self.model.get_metric_values()
self.model.reset_metrics()
for key in epoch_loss.keys():
epoch_loss[key] /= len(self.val_loader)
epoch_loss[key] = np.round(epoch_loss[key], 5)
loss_string = '{}'.format(epoch_loss)[
1:-1].replace("'", '').replace(",", ' ||')
print()
print("[{}|{}] || {} || Time: {:10.4f} s".format(
self.epoch, self.num_epochs, loss_string, running_time))
for metric, score in metric_dict.items():
print(metric + ': ' + str(score), end=' | ')
print()
print('==========================================================================')
log_dict = {
"Validation Loss/Epoch": epoch_loss['T'] / len(self.val_loader), }
log_dict.update(metric_dict)
self.logging(log_dict)
# Save model gives best mAP score
if metric_dict['acc'] > self.best_value:
self.best_value = metric_dict['acc']
self.checkpoint.save(self.model, save_mode='best', epoch=self.epoch,
iters=self.iters, best_value=self.best_value)
if self.visualize_when_val:
self.visualize_batch()
def visualize_batch(self):
# Vizualize Grad Class Activation Mapping
if not os.path.exists('./samples'):
os.mkdir('./samples')
denom = Denormalize()
batch = next(iter(self.val_loader))
images = batch["imgs"]
#targets = batch["targets"]
self.model.eval()
config_name = self.cfg.model_name.split('_')[0]
grad_cam = GradCam(model=self.model.model, config_name=config_name)
for idx, inputs in enumerate(images):
image_outname = os.path.join(
'samples', f'{self.epoch}_{self.iters}_{idx}.jpg')
img_show = denom(inputs)
inputs = inputs.unsqueeze(0)
inputs = inputs.to(self.model.device)
target_category = None
grayscale_cam, label_idx = grad_cam(inputs, target_category)
label = self.cfg.obj_list[label_idx]
img_cam = show_cam_on_image(img_show, grayscale_cam, label)
cv2.imwrite(image_outname, img_cam)
def __str__(self) -> str:
title = '------------- Model Summary ---------------\n'
name = f'Name: {self.model.name}\n'
params = f'Number of params: {self.model.trainable_parameters}\n'
train_iter_per_epoch = f'Number of train iterations per epoch: {len(self.train_loader)}\n'
val_iter_per_epoch = f'Number of val iterations per epoch: {len(self.val_loader)}'
return title + name + params + train_iter_per_epoch + val_iter_per_epoch
def print_forward_step(self):
self.model.eval()
outputs = self.model.forward_step()
print('Feedforward: output_shape: ', outputs.shape)
def set_accumulate_step(self):
self.use_accumulate = False
if self.config.total_accumulate_steps > 0:
self.use_accumulate = True
self.accumulate_steps = max(
round(self.config.total_accumulate_steps / self.config.batch_size), 1)
def set_amp(self):
self.use_amp = False
if self.config.mixed_precision:
self.use
|
self.optimizer.zero_grad()
if self.scheduler is not None and not self.step_per_epoch:
# self.scheduler.step()
self.scheduler.step(
|
random_line_split
|
trainer.py
|
Denormalize
import cv2
import numpy as np
from utils.gradcam import *
class Trainer(nn.Module):
def
|
(self, config, model, train_loader, val_loader, **kwargs):
super().__init__()
self.config = config
self.model = model
self.train_loader = train_loader
self.val_loader = val_loader
self.optimizer = model.optimizer
self.criterion = model.criterion
self.metrics = model.metrics # list of classification metrics
self.set_attribute(kwargs)
def logged(self, logs):
tags = [tag for tag in logs.keys()]
values = [value for value in logs.values()]
self.logger.write(tags=tags, values=values)
def fit(self, start_epoch=0, start_iter=0, num_epochs=10, print_per_iter=None):
self.num_epochs = num_epochs
self.num_iters = num_epochs * len(self.train_loader)
if self.checkpoint is None:
self.checkpoint = CheckPoint(save_per_epoch=int(num_epochs/10) + 1)
if print_per_iter is not None:
self.print_per_iter = print_per_iter
else:
self.print_per_iter = int(len(self.train_loader) / 10)
self.epoch = start_epoch
# For one-cycle lr only
if self.scheduler is not None and self.step_per_epoch:
self.scheduler.last_epoch = start_epoch - 1
self.start_iter = start_iter % len(self.train_loader)
print(f'===========================START TRAINING=================================')
print(f'Training for {num_epochs} epochs ...')
for epoch in range(self.epoch, self.num_epochs):
try:
self.epoch = epoch
self.train_per_epoch()
if self.num_evaluate_per_epoch != 0:
if epoch % self.num_evaluate_per_epoch == 0 and epoch+1 >= self.num_evaluate_per_epoch:
self.evaluate_per_epoch()
if self.scheduler is not None and self.step_per_epoch:
self.scheduler.step()
lrl = [x['lr'] for x in self.optimizer.param_groups]
lr = sum(lrl) / len(lrl)
log_dict = {'Learning rate/Epoch': lr}
self.logged(log_dict)
except KeyboardInterrupt:
self.checkpoint.save(self.model, save_mode='last', epoch=self.epoch,
iters=self.iters, best_value=self.best_value)
print("Stop training, checkpoint saved...")
break
print("Training Completed!")
def train_per_epoch(self):
self.model.train()
running_loss = 0.0
running_time = 0
loop = tqdm(self.train_loader)
for i, batch in enumerate(loop):
start_time = time.time()
with torch.cuda.amp.autocast():
loss, loss_dict = self.model.training_step(batch)
if self.use_accumulate:
loss /= self.accumulate_steps
self.model.scaler(loss, self.optimizer)
if self.use_accumulate:
if (i+1) % self.accumulate_steps == 0 or i == len(self.train_loader)-1:
self.model.scaler.step(
self.optimizer, clip_grad=self.clip_grad, parameters=self.model.parameters())
self.optimizer.zero_grad()
if self.scheduler is not None and not self.step_per_epoch:
self.scheduler.step(
(self.num_epochs + i) / len(self.train_loader))
lrl = [x['lr'] for x in self.optimizer.param_groups]
lr = sum(lrl) / len(lrl)
log_dict = {'Learning rate/Iterations': lr}
self.logging(log_dict)
else:
self.model.scaler.step(
self.optimizer, clip_grad=self.clip_grad, parameters=self.model.parameters())
self.optimizer.zero_grad()
if self.scheduler is not None and not self.step_per_epoch:
# self.scheduler.step()
self.scheduler.step(
(self.num_epochs + i) / len(self.train_loader))
lrl = [x['lr'] for x in self.optimizer.param_groups]
lr = sum(lrl) / len(lrl)
log_dict = {'Learning rate/Iterations': lr}
self.logging(log_dict)
torch.cuda.synchronize()
end_time = time.time()
for (key, value) in loss_dict.items():
if key in running_loss.keys():
running_loss[key] += value
else:
running_loss[key] = value
running_time += end_time-start_time
self.iters = self.start_iter + \
len(self.train_loader)*self.epoch + i + 1
if self.iters % self.print_per_iter == 0:
for key in running_loss.keys():
running_loss[key] /= self.print_per_iter
running_loss[key] = np.round(running_loss[key], 5)
loss_string = '{}'.format(running_loss)[
1:-1].replace("'", '').replace(",", ' ||')
print("[{}|{}] [{}|{}] || {} || Time: {:10.4f}s".format(
self.epoch, self.num_epochs, self.iters, self.num_iters, loss_string, running_time))
self.logging(
{"Training Loss/Batch": running_loss['T'] / self.print_per_iter, })
running_loss = {}
running_time = 0
if (self.iters % self.checkpoint.save_per_iter == 0 or self.iters == self.num_iters - 1):
print(f'Save model at [{self.epoch}|{self.iters}] to last.pth')
self.checkpoint.save(
self.model,
save_mode='last',
epoch=self.epoch,
iters=self.iters,
best_value=self.best_value)
def evaluate_per_epoch(self):
self.model.eval()
epoch_loss = {}
metric_dict = {}
print('=============================EVALUATION===================================')
start_time = time.time()
with torch.no_grad():
for batch in tqdm(self.val_loader):
_, loss_dict = self.model.evaluate_step(batch)
for (key, val) in loss_dict.items():
if key in epoch_loss.keys():
epoch_loss[key] += val
else:
epoch_loss[key] = val
end_time = time.time()
running_time = end_time - start_time
metric_dict = self.model.get_metric_values()
self.model.reset_metrics()
for key in epoch_loss.keys():
epoch_loss[key] /= len(self.val_loader)
epoch_loss[key] = np.round(epoch_loss[key], 5)
loss_string = '{}'.format(epoch_loss)[
1:-1].replace("'", '').replace(",", ' ||')
print()
print("[{}|{}] || {} || Time: {:10.4f} s".format(
self.epoch, self.num_epochs, loss_string, running_time))
for metric, score in metric_dict.items():
print(metric + ': ' + str(score), end=' | ')
print()
print('==========================================================================')
log_dict = {
"Validation Loss/Epoch": epoch_loss['T'] / len(self.val_loader), }
log_dict.update(metric_dict)
self.logging(log_dict)
# Save model gives best mAP score
if metric_dict['acc'] > self.best_value:
self.best_value = metric_dict['acc']
self.checkpoint.save(self.model, save_mode='best', epoch=self.epoch,
iters=self.iters, best_value=self.best_value)
if self.visualize_when_val:
self.visualize_batch()
def visualize_batch(self):
# Vizualize Grad Class Activation Mapping
if not os.path.exists('./samples'):
os.mkdir('./samples')
denom = Denormalize()
batch = next(iter(self.val_loader))
images = batch["imgs"]
#targets = batch["targets"]
self.model.eval()
config_name = self.cfg.model_name.split('_')[0]
grad_cam = GradCam(model=self.model.model, config_name=config_name)
for idx, inputs in enumerate(images):
image_outname = os.path.join(
'samples', f'{self.epoch}_{self.iters}_{idx}.jpg')
img_show = denom(inputs)
inputs = inputs.unsqueeze(0)
inputs = inputs.to(self.model.device)
target_category = None
grayscale_cam, label_idx = grad_cam(inputs, target_category)
label = self.cfg.obj_list[label_idx]
img_cam = show_cam_on_image(img_show, grayscale_cam, label)
cv2.imwrite(image_outname, img_cam)
def __str__(self) -> str:
title = '------------- Model Summary ---------------\n'
name = f'Name: {self.model.name}\n'
params = f'Number of params: {self.model.trainable_parameters}\n'
train_iter_per_epoch = f'Number of train iterations per epoch: {len(self.train_loader)}\n'
val_iter_per_epoch = f'Number of val iterations per epoch: {len(self.val_loader)}'
return title + name + params + train_iter_per_epoch + val_iter_per_epoch
def print_forward_step(self):
self.model.eval()
outputs = self.model.forward_step()
print('Feedforward: output_shape: ', outputs.shape)
def set_accumulate_step(self):
self.use_accumulate = False
if self.config.total_accumulate_steps > 0:
self.use_accumulate = True
self.accumulate_steps = max(
round(self.config.total_accumulate_steps / self.config.batch_size), 1)
def set_amp(self):
self.use_amp = False
if self.config.mixed_precision:
self
|
__init__
|
identifier_name
|
main.py
|
�于阈值的就不管了(去除掉),小于阈值的就可能是另一个目标框,留下来继续比较
inds = np.where(ovr <= thresh)[0] # 返回满足条件的order[1:]中的索引值
order = order[inds + 1] # +1得到order中的索引值
return keep
class FaceDetector:
def __init__(self, model_path):
self.strides = [8.0, 16.0, 32.0, 64.0]
self.min_boxes = [
[10.0, 16.0, 24.0],
[32.0, 48.0],
[64.0, 96.0],
[128.0, 192.0, 256.0]]
self.in_h, self.in_w = (240, 320)
self.face_detector = cv2.dnn.readNetFromONNX(model_path)
# generate_prior_anchor
w_h_list = [self.in_w, self.in_h]
featuremap_size = []
for size in w_h_list:
fm_item = []
for stride in self.strides:
fm_item.append(np.ceil(size / stride))
featuremap_size.append(fm_item)
shrinkage_size = []
for size in w_h_list:
shrinkage_size.append(self.strides)
self.priors = []
for index in range(4):
scale_w = self.in_w / shrinkage_size[0][index]
scale_h = self.in_h / shrinkage_size[1][index]
for j in range(int(featuremap_size[1][index])):
for i in range(int(featuremap_size[0][index])):
x_center = (i + 0.5) / scale_w
y_center = (j + 0.5) / scale_h
for k in self.min_boxes[index]:
w = k / self.in_w
h = k / self.in_h
self.priors.append([clip(x_center, 1), clip(y_center, 1),
clip(w, 1), clip(h, 1)])
def postprocess(self, image_w, image_h, scores, boxes, score_threshold):
bbox_value = boxes.flatten()
score_value = scores.flatten()
num_anchors = len(self.priors)
# print(bbox_value.shape)
# print(score_value.shape)
rect_boxes = []
confidences = []
for i in range(num_anchors):
score = score_value[2 * i + 1]
if score > score_threshold:
x_center = bbox_value[i * 4] * 0.1 * self.priors[i][2] + self.priors[i][0]
y_center = bbox_value[i * 4 + 1] * 0.1 * self.priors[i][3] + self.priors[i][1]
w = math.exp(bbox_value[i * 4 + 2] * 0.2) * self.priors[i][2]
h = math.exp(bbox_value[i * 4 + 3] * 0.2) * self.priors[i][3]
x1 = int(clip(x_center - w / 2.0, 1) * image_w)
y1 = int(clip(y_center - h / 2.0, 1) * image_h)
x2 = int(clip(x_center + w / 2.0, 1) * image_w)
y2 = int(clip(y_center + h / 2.0, 1) * image_h)
score = clip(score, 1)
rect_boxes.append([x1, y1, x2 - x1, y2 - y1])
confidences.append(float(score))
indices = cv2.dnn.NMSBoxes(rect_boxes, confidences, score_threshold, 0.5)
if len(indices):
indices = indices.flatten()
rect_boxes = np.array(rect_boxes)[indices]
confidences = np.array(confidences)[indices]
# keep = self.nms(rect_boxes.astype(np.int32), confidences, 0.5)
# print(rect_boxes[indices])
# print(confidences[indices])
return rect_boxes, confidences
def __call__(self, img, **kwargs):
inputBlob = cv2.dnn.blobFromImage(img, 1.0 / 128, (320, 240), (127, 127, 127), swapRB=True)
self.face_detector.setInput(inputBlob)
scores, boxes = self.face_detector.forward(["scores", "boxes"])
# print(scores)
image_h, image_w = img.shape[:2]
rect_boxes, confidences = self.postprocess(image_w, image_h, scores, boxes, 0.6)
return rect_boxes, confidences
class SmokeDetector:
def __init__(self, model_path, confThreshold=0.5, nmsThreshold=0.5, objThreshold=0.5):
self.classes = ['smoke']
self.colors = [np.random.randint(0, 255, size=3).tolist() for _ in range(len(self.classes))]
# num_classes = len(self.classes)
num_classes = 1
anchors = [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]]
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.no = num_classes + 5 # number of outputs per anchor
self.grid = [np.zeros(1)] * self.nl # init grid
self.stride = np.array([8., 16., 32.])
self.anchor_grid = np.asarray(anchors, dtype=np.float32).reshape(self.nl, 1, -1, 1, 1, 2)
self.net = cv2.dnn.readNet(model_path)
self.confThreshold = confThreshold
self.nmsThreshold = nmsThreshold
self.objThreshold = objThreshold
def _make_grid(self, nx=20, ny=20):
xv, yv = np.meshgrid(np.arange(ny), np.arange(nx))
return np.stack((xv, yv), 2).reshape((1, 1, ny, nx, 2)).astype(np.float32)
def postprocess(self, image_w, image_h, outs):
r
|
iow = image_h / 640, image_w / 640
# Scan through all the bounding boxes output from the network and keep only the
# ones with high confidence scores. Assign the box's class label as the class with the highest score.
classIds = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > self.confThreshold and detection[4] > self.objThreshold:
center_x = int(detection[0] * ratiow)
center_y = int(detection[1] * ratioh)
width = int(detection[2] * ratiow)
height = int(detection[3] * ratioh)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
classIds.append(classId)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
# Perform non maximum suppression to eliminate redundant overlapping boxes with
# lower confidences.
indices = cv2.dnn.NMSBoxes(boxes, confidences, self.confThreshold, self.nmsThreshold)
# print(indices)
if len(indices):
indices = indices.flatten()
boxes = np.array(boxes)[indices]
confidences = np.array(confidences)[indices]
return boxes, confidences
def __call__(self, srcimg):
blob = cv2.dnn.blobFromImage(srcimg, 1 / 255.0, (640, 640), [0, 0, 0], swapRB=True, crop=False)
# Sets the input to the network
self.net.setInput(blob)
# Runs the forward pass to get output of the output layers
outs = self.net.forward(self.net.getUnconnectedOutLayersNames())
z = [] # inference output
for i in range(self.nl):
bs, _, ny, nx = outs[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
# outs[i] = outs[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
outs[i] = outs[i].reshape(bs, self.na, self.no, ny, nx
|
atioh, rat
|
identifier_name
|
main.py
|
�于阈值的就不管了(去除掉),小于阈值的就可能是另一个目标框,留下来继续比较
inds = np.where(ovr <= thresh)[0] # 返回满足条件的order[1:]中的索引值
order = order[inds + 1] # +1得到order中的索引值
return keep
class FaceDetector:
def __init__(self, model_path):
self.strides = [8.0, 16.0, 32.0, 64.0]
self.min_boxes = [
[10.0, 16.0, 24.0],
[32.0, 48.0],
[64.0, 96.0],
[128.0, 192.0, 256.0]]
self.in_h, self.in_w = (240, 320)
self.face_detector = cv2.dnn.readNetFromONNX(model_path)
# generate_prior_anchor
w_h_list = [self.in_w, self.in_h]
featuremap_size = []
for size in w_h_list:
fm_item = []
for stride in self.strides:
fm_item.append(np.ceil(size / stride))
featuremap_size.append(fm_item)
shrinkage_size = []
for size in w_h_list:
shrinkage_size.append(self.strides)
self.priors = []
for index in range(4):
scale_w = self.in_w / shrinkage_size[0][index]
scale_h = self.in_h / shrinkage_size[1][index]
for j in range(int(featuremap_size[1][index])):
for i in range(int(featuremap_size[0][index])):
x_center = (i + 0.5) / scale_w
y_center = (j + 0.5) / scale_h
for k in self.min_boxes[index]:
w = k / self.in_w
h = k / self.in_h
self.priors.append([clip(x_center, 1), clip(y_center, 1),
clip(w, 1), clip(h, 1)])
def postprocess(self, image_w, image_h, scores, boxes, score_threshold):
bbox_value = boxes.flatten()
score_value = scores.flatten()
num_anchors = len(self.priors)
# print(bbox_value.shape)
# print(score_value.shape)
rect_boxes = []
confidences = []
for i in range(num_anchors):
score = score_value[2 * i + 1]
if score > score_threshold:
x_center = bbox_value[i * 4] * 0.1 * self.priors[i][2] + self.priors[i][0]
y_center = bbox_value[i * 4 + 1] * 0.1 * self.priors[i][3] + self.priors[i][1]
w = math.exp(bbox_value[i * 4 + 2] * 0.2) * self.priors[i][2]
h = math.exp(bbox_value[i * 4 + 3] * 0.2) * self.priors[i][3]
x1 = int(clip(x_center - w / 2.0, 1) * image_w)
y1 = int(clip(y_center - h / 2.0, 1) * image_h)
x2 = int(clip(x_center + w / 2.0, 1) * image_w)
y2 = int(clip(y_center + h / 2.0, 1) * image_h)
score = clip(score, 1)
rect_boxes.append([x1, y1, x2 - x1, y2 - y1])
confidences.append(float(score))
indices = cv2.dnn.NMSBoxes(rect_boxes, confidences, score_threshold, 0.5)
if len(indices):
indices = indices.flatten()
rect_boxes = np.array(rect_boxes)[indices]
confidences = np.array(confidences)[indices]
# keep = self.nms(rect_boxes.astype(np.int32), confidences, 0.5)
# print(rect_boxes[indices])
# print(confidences[indices])
return rect_boxes, confidences
def __call__(self, img, **kwargs):
inputBlob = cv2.dnn.blobFromImage(img, 1.0 / 128, (320, 240), (127, 127, 127), swapRB=True)
self.face_detector.setInput(inputBlob)
scores, boxes = self.face_detector.forward(["scores", "boxes"])
# print(scores)
image_h, image_w = img.shape[:2]
rect_boxes, confidences = self.postprocess(image_w, image_h, scores, boxes, 0.6)
return rect_boxes, confidences
class SmokeDetector:
def __init__(self, model_path, confThreshold=0.5, nmsThreshold=0.5, objThreshold=0.5):
self.classes = ['smoke']
self.colors = [np.random.randint(0, 255, size=3).tolist() for _ in range(len(self.classes))]
# num_classes = len(self.classes)
num_classes = 1
anchors = [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]]
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.no = num_classes + 5 # number of outputs per anchor
self.grid = [np.zeros(1)] * self.nl # init grid
self.stride = np.array([8., 16., 32.])
self.anchor_grid = np.asarray(anchors, dtype=np.float32).reshape(self.nl, 1, -1, 1, 1, 2)
self.net = cv2.dnn.readNet(model_path)
self.confThreshold = confThreshold
self.nmsThreshold = nmsThreshold
self.objThreshold = objThreshold
def _make_grid(self, nx=20, ny=20):
xv, yv = np.meshgrid(np.arange(ny), np.arange(nx))
return np.stack((xv, yv), 2).reshape((1, 1, ny, nx, 2)).astype(np.float32)
def postprocess(self, image_w, image_h, outs):
ratioh, ratiow = image_h / 640, image_w / 640
# Scan through all the bounding boxes output from the network and keep only the
# ones with high confidence scores. Assign the box's class label as the class with the highest score.
classIds = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > self.confThreshold and detection[4] > self.objThreshold:
center_x = int(detection[0] * ratiow)
center_y = int(detection[1] * ratioh)
width = int(detection[2] * ratiow)
height = int(detection[3] * ratioh)
left = in
|
)
if len(indices):
indices = indices.flatten()
boxes = np.array(boxes)[indices]
confidences = np.array(confidences)[indices]
return boxes, confidences
def __call__(self, srcimg):
blob = cv2.dnn.blobFromImage(srcimg, 1 / 255.0, (640, 640), [0, 0, 0], swapRB=True, crop=False)
# Sets the input to the network
self.net.setInput(blob)
# Runs the forward pass to get output of the output layers
outs = self.net.forward(self.net.getUnconnectedOutLayersNames())
z = [] # inference output
for i in range(self.nl):
bs, _, ny, nx = outs[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
# outs[i] = outs[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
outs[i] = outs[i].reshape(bs, self.na, self.no, ny, nx
|
t(center_x - width / 2)
top = int(center_y - height / 2)
classIds.append(classId)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
# Perform non maximum suppression to eliminate redundant overlapping boxes with
# lower confidences.
indices = cv2.dnn.NMSBoxes(boxes, confidences, self.confThreshold, self.nmsThreshold)
# print(indices
|
conditional_block
|
main.py
|
�于阈值的就不管了(去除掉),小于阈值的就可能是另一个目标框,留下来继续比较
inds = np.where(ovr <= thresh)[0] # 返回满足条件的order[1:]中的索引值
order = order[inds + 1] # +1得到order中的索引值
return keep
class FaceDetector:
def __init__(self, model_path):
self.strides = [8.0, 16.0, 32.0, 64.0]
self.min_boxes = [
[10.0, 16.0, 24.0],
[32.0, 48.0],
[64.0, 96.0],
[128.0, 192.0, 256.0]]
self.in_h, self.in_w = (240, 320)
self.face_detector = cv2.dnn.readNetFromONNX(model_path)
# generate_prior_anchor
w_h_list = [self.in_w, self.in_h]
featuremap_size = []
for size in w_h_list:
fm_item = []
for stride in self.strides:
fm_item.append(np.ceil(size / stride))
featuremap_size.append(fm_item)
shrinkage_size = []
for size in w_h_list:
shrinkage_size.append(self.strides)
self.priors = []
for index in range(4):
scale_w = self.in_w / shrinkage_size[0][index]
scale_h = self.in_h / shrinkage_size[1][index]
for j in range(int(featuremap_size[1][index])):
for i in range(int(featuremap_size[0][index])):
x_center = (i + 0.5) / scale_w
y_center = (j + 0.5) / scale_h
for k in self.min_boxes[index]:
w = k / self.in_w
h = k / self.in_h
self.priors.append([clip(x_center, 1), clip(y_center, 1),
clip(w, 1), clip(h, 1)])
def postprocess(self, image_w, image_h, scores, boxes, score_threshold):
bbox_value = boxes.flatten()
score_value = scores.flatten()
num_anchors = len(self.priors)
# print(bbox_value.shape)
# print(score_value.shape)
rect_boxes = []
confidences = []
for i in range(num_anchors):
score = score_value[2 * i + 1]
if score > score_threshold:
x_center = bbox_value[i * 4] * 0.1 * self.priors[i][2] + self.priors[i][0]
y_center = bbox_value[i * 4 + 1] * 0.1 * self.priors[i][3] + self.priors[i][1]
w = math.exp(bbox_value[i * 4 + 2] * 0.2) * self.priors[i][2]
h = math.exp(bbox_value[i * 4 + 3] * 0.2) * self.priors[i][3]
x1 = int(clip(x_center - w / 2.0, 1) * image_w)
y1 = int(clip(y_center - h / 2.0, 1) * image_h)
x2 = int(clip(x_center + w / 2.0, 1) * image_w)
y2 = int(clip(y_center + h / 2.0, 1) * image_h)
score = clip(score, 1)
rect_boxes.append([x1, y1, x2 - x1, y2 - y1])
confidences.append(float(score))
indices = cv2.dnn.NMSBoxes(rect_boxes, confidences, score_threshold, 0.5)
if len(indices):
indices = indices.flatten()
rect_boxes = np.array(rect_boxes)[indices]
confidences = np.array(confidences)[indices]
# keep = self.nms(rect_boxes.astype(np.int32), confidences, 0.5)
# print(rect_boxes[indices])
# print(confidences[indices])
return rect_boxes, confidences
def __call__(self, img, **kwargs):
inputBlob = cv2.dnn.blobFromImage(img, 1.0 / 128, (320, 240), (127, 127, 127), swapRB=True)
self.face_detector.setInput(inputBlob)
scores, boxes = self.face_detector.forward(["scores", "boxes"])
# print(scores)
image_h, image_w = img.shape[:2]
rect_boxes, confidences = self.postprocess(image_w, image_h, scores, boxes, 0.6)
return rect_boxes, confidences
class SmokeDetector:
def __init__(self, model_path, confThreshold=0.5, nmsThreshold=0.5, objThreshold=0.5):
self.classes = ['smoke']
self.colors = [np.random.randint(0, 255, size=3).tolist() for _ in range(len(self.classes))]
# num_classes = len(self.classes)
num_classes = 1
anchors = [[10, 13, 16, 30, 33, 23],
|
ratioh, ratiow = image_h / 640, image_w / 640
# Scan through all the bounding boxes output from the network and keep only the
# ones with high confidence scores. Assign the box's class label as the class with the highest score.
classIds = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > self.confThreshold and detection[4] > self.objThreshold:
center_x = int(detection[0] * ratiow)
center_y = int(detection[1] * ratioh)
width = int(detection[2] * ratiow)
height = int(detection[3] * ratioh)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
classIds.append(classId)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
# Perform non maximum suppression to eliminate redundant overlapping boxes with
# lower confidences.
indices = cv2.dnn.NMSBoxes(boxes, confidences, self.confThreshold, self.nmsThreshold)
# print(indices)
if len(indices):
indices = indices.flatten()
boxes = np.array(boxes)[indices]
confidences = np.array(confidences)[indices]
return boxes, confidences
def __call__(self, srcimg):
blob = cv2.dnn.blobFromImage(srcimg, 1 / 255.0, (640, 640), [0, 0, 0], swapRB=True, crop=False)
# Sets the input to the network
self.net.setInput(blob)
# Runs the forward pass to get output of the output layers
outs = self.net.forward(self.net.getUnconnectedOutLayersNames())
z = [] # inference output
for i in range(self.nl):
bs, _, ny, nx = outs[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
# outs[i] = outs[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
outs[i] = outs[i].reshape(bs, self.na, self.no, ny, nx
|
[30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]]
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.no = num_classes + 5 # number of outputs per anchor
self.grid = [np.zeros(1)] * self.nl # init grid
self.stride = np.array([8., 16., 32.])
self.anchor_grid = np.asarray(anchors, dtype=np.float32).reshape(self.nl, 1, -1, 1, 1, 2)
self.net = cv2.dnn.readNet(model_path)
self.confThreshold = confThreshold
self.nmsThreshold = nmsThreshold
self.objThreshold = objThreshold
def _make_grid(self, nx=20, ny=20):
xv, yv = np.meshgrid(np.arange(ny), np.arange(nx))
return np.stack((xv, yv), 2).reshape((1, 1, ny, nx, 2)).astype(np.float32)
def postprocess(self, image_w, image_h, outs):
|
identifier_body
|
main.py
|
大于阈值的就不管了(去除掉),小于阈值的就可能是另一个目标框,留下来继续比较
inds = np.where(ovr <= thresh)[0] # 返回满足条件的order[1:]中的索引值
order = order[inds + 1] # +1得到order中的索引值
return keep
class FaceDetector:
def __init__(self, model_path):
self.strides = [8.0, 16.0, 32.0, 64.0]
self.min_boxes = [
[10.0, 16.0, 24.0],
[32.0, 48.0],
[64.0, 96.0],
[128.0, 192.0, 256.0]]
self.in_h, self.in_w = (240, 320)
self.face_detector = cv2.dnn.readNetFromONNX(model_path)
# generate_prior_anchor
w_h_list = [self.in_w, self.in_h]
featuremap_size = []
for size in w_h_list:
fm_item = []
for stride in self.strides:
fm_item.append(np.ceil(size / stride))
featuremap_size.append(fm_item)
shrinkage_size = []
for size in w_h_list:
shrinkage_size.append(self.strides)
self.priors = []
for index in range(4):
scale_w = self.in_w / shrinkage_size[0][index]
scale_h = self.in_h / shrinkage_size[1][index]
for j in range(int(featuremap_size[1][index])):
for i in range(int(featuremap_size[0][index])):
x_center = (i + 0.5) / scale_w
y_center = (j + 0.5) / scale_h
for k in self.min_boxes[index]:
w = k / self.in_w
h = k / self.in_h
self.priors.append([clip(x_center, 1), clip(y_center, 1),
clip(w, 1), clip(h, 1)])
def postprocess(self, image_w, image_h, scores, boxes, score_threshold):
bbox_value = boxes.flatten()
score_value = scores.flatten()
num_anchors = len(self.priors)
# print(bbox_value.shape)
# print(score_value.shape)
rect_boxes = []
confidences = []
for i in range(num_anchors):
score = score_value[2 * i + 1]
if score > score_threshold:
x_center = bbox_value[i * 4] * 0.1 * self.priors[i][2] + self.priors[i][0]
y_center = bbox_value[i * 4 + 1] * 0.1 * self.priors[i][3] + self.priors[i][1]
w = math.exp(bbox_value[i * 4 + 2] * 0.2) * self.priors[i][2]
h = math.exp(bbox_value[i * 4 + 3] * 0.2) * self.priors[i][3]
x1 = int(clip(x_center - w / 2.0, 1) * image_w)
y1 = int(clip(y_center - h / 2.0, 1) * image_h)
x2 = int(clip(x_center + w / 2.0, 1) * image_w)
y2 = int(clip(y_center + h / 2.0, 1) * image_h)
score = clip(score, 1)
rect_boxes.append([x1, y1, x2 - x1, y2 - y1])
confidences.append(float(score))
indices = cv2.dnn.NMSBoxes(rect_boxes, confidences, score_threshold, 0.5)
if len(indices):
indices = indices.flatten()
rect_boxes = np.array(rect_boxes)[indices]
confidences = np.array(confidences)[indices]
# keep = self.nms(rect_boxes.astype(np.int32), confidences, 0.5)
# print(rect_boxes[indices])
# print(confidences[indices])
return rect_boxes, confidences
def __call__(self, img, **kwargs):
inputBlob = cv2.dnn.blobFromImage(img, 1.0 / 128, (320, 240), (127, 127, 127), swapRB=True)
self.face_detector.setInput(inputBlob)
|
return rect_boxes, confidences
class SmokeDetector:
def __init__(self, model_path, confThreshold=0.5, nmsThreshold=0.5, objThreshold=0.5):
self.classes = ['smoke']
self.colors = [np.random.randint(0, 255, size=3).tolist() for _ in range(len(self.classes))]
# num_classes = len(self.classes)
num_classes = 1
anchors = [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]]
self.nl = len(anchors) # number of detection layers
self.na = len(anchors[0]) // 2 # number of anchors
self.no = num_classes + 5 # number of outputs per anchor
self.grid = [np.zeros(1)] * self.nl # init grid
self.stride = np.array([8., 16., 32.])
self.anchor_grid = np.asarray(anchors, dtype=np.float32).reshape(self.nl, 1, -1, 1, 1, 2)
self.net = cv2.dnn.readNet(model_path)
self.confThreshold = confThreshold
self.nmsThreshold = nmsThreshold
self.objThreshold = objThreshold
def _make_grid(self, nx=20, ny=20):
xv, yv = np.meshgrid(np.arange(ny), np.arange(nx))
return np.stack((xv, yv), 2).reshape((1, 1, ny, nx, 2)).astype(np.float32)
def postprocess(self, image_w, image_h, outs):
ratioh, ratiow = image_h / 640, image_w / 640
# Scan through all the bounding boxes output from the network and keep only the
# ones with high confidence scores. Assign the box's class label as the class with the highest score.
classIds = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > self.confThreshold and detection[4] > self.objThreshold:
center_x = int(detection[0] * ratiow)
center_y = int(detection[1] * ratioh)
width = int(detection[2] * ratiow)
height = int(detection[3] * ratioh)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
classIds.append(classId)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
# Perform non maximum suppression to eliminate redundant overlapping boxes with
# lower confidences.
indices = cv2.dnn.NMSBoxes(boxes, confidences, self.confThreshold, self.nmsThreshold)
# print(indices)
if len(indices):
indices = indices.flatten()
boxes = np.array(boxes)[indices]
confidences = np.array(confidences)[indices]
return boxes, confidences
def __call__(self, srcimg):
blob = cv2.dnn.blobFromImage(srcimg, 1 / 255.0, (640, 640), [0, 0, 0], swapRB=True, crop=False)
# Sets the input to the network
self.net.setInput(blob)
# Runs the forward pass to get output of the output layers
outs = self.net.forward(self.net.getUnconnectedOutLayersNames())
z = [] # inference output
for i in range(self.nl):
bs, _, ny, nx = outs[i].shape # x(bs,255,20,20) to x(bs,3,20,20,85)
# outs[i] = outs[i].view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
outs[i] = outs[i].reshape(bs, self.na, self.no, ny, nx
|
scores, boxes = self.face_detector.forward(["scores", "boxes"])
# print(scores)
image_h, image_w = img.shape[:2]
rect_boxes, confidences = self.postprocess(image_w, image_h, scores, boxes, 0.6)
|
random_line_split
|
director.ts
|
{
after: options.after || null,
before: options.before || null,
on: options.on || null,
};
return this;
};
Router.prototype.param = function (token, matcher) {
if (token[0] !== ':') {
token = `:${token}`;
}
const compiled = new RegExp(token, 'g');
this.params[token] = function (str) {
return str.replace(compiled, matcher.source || matcher);
};
return this;
};
Router.prototype.on = Router.prototype.route = function (method, path, route) {
const self = this;
if (!route && typeof path === 'function') {
route = path;
path = method;
method = 'on';
}
if (Array.isArray(path)) {
return path.forEach(function (p) {
self.on(method, p, route);
});
}
if (path.source) {
path = path.source.replace(/\\\//gi, '/');
}
if (Array.isArray(method)) {
return method.forEach(function (m) {
self.on(m.toLowerCase(), path, route);
});
}
path = path.split(new RegExp(this.delimiter));
path = terminator(path, this.delimiter);
this.insert(method, this.scope.concat(path), route);
};
Router.prototype.path = function (path, routesFn) {
const self = this;
const { length } = this.scope;
if (path.source) {
path = path.source.replace(/\\\//gi, '/');
}
path = path.split(new RegExp(this.delimiter));
path = terminator(path, this.delimiter);
this.scope = this.scope.concat(path);
routesFn.call(this, this);
this.scope.splice(length, path.length);
};
Router.prototype.dispatch = function (method, path, callback) {
const self = this;
let fns = this.traverse(
method,
path.replace(QUERY_SEPARATOR, ''),
this.routes,
''
);
const invoked = this._invoked;
let after;
this._invoked = true;
if (!fns || fns.length === 0) {
this.last = [];
if (typeof this.notfound === 'function') {
this.invoke(
[this.notfound],
{
method,
path,
},
callback
);
}
return false;
}
if (this.recurse === 'forward') {
fns = fns.reverse();
}
function updateAndInvoke() {
self.last = fns.after;
self.invoke(self.runlist(fns), self, callback);
}
after =
this.every && this.every.after
? [this.every.after].concat(this.last)
: [this.last];
if (after && after.length > 0 && invoked) {
if (this.async) {
this.invoke(after, this, updateAndInvoke);
} else {
this.invoke(after, this);
updateAndInvoke();
}
return true;
}
updateAndInvoke();
return true;
};
Router.prototype.invoke = function (fns, thisArg, callback) {
const self = this;
let apply;
if (this.async) {
apply = function (fn, next) {
if (Array.isArray(fn)) {
return _asyncEverySeries(fn, apply, next);
} else if (typeof fn === 'function') {
fn.apply(thisArg, (fns.captures || []).concat(next));
}
};
_asyncEverySeries(fns, apply, function () {
if (callback) {
callback.apply(thisArg, arguments);
}
});
} else {
apply = function (fn) {
if (Array.isArray(fn)) {
return _every(fn, apply);
} else if (typeof fn === 'function') {
return fn.apply(thisArg, fns.captures || []);
} else if (typeof fn === 'string' && self.resource) {
self.resource[fn].apply(thisArg, fns.captures || []);
}
};
_every(fns, apply);
}
};
Router.prototype.traverse = function (method, path, routes, regexp, filter) {
let fns = [];
let current;
let exact;
let match;
let next;
let that;
function filterRoutes(routes) {
if (!filter) {
return routes;
}
function deepCopy(source) {
const result = [];
for (let i = 0; i < source.length; i++) {
result[i] = Array.isArray(source[i]) ? deepCopy(source[i]) : source[i];
}
return result;
}
function applyFilter(fns) {
for (let i = fns.length - 1; i >= 0; i--) {
if (Array.isArray(fns[i])) {
applyFilter(fns[i]);
if (fns[i].length === 0) {
fns.splice(i, 1);
}
} else if (!filter(fns[i])) {
fns.splice(i, 1);
}
}
}
const newRoutes = deepCopy(routes);
newRoutes.matched = routes.matched;
newRoutes.captures = routes.captures;
newRoutes.after = routes.after.filter(filter);
applyFilter(newRoutes);
return newRoutes;
}
if (path === this.delimiter && routes[method]) {
next = [[routes.before, routes[method]].filter(Boolean)];
next.after = [routes.after].filter(Boolean);
next.matched = true;
next.captures = [];
return filterRoutes(next);
}
for (const r in routes) {
if (
routes.hasOwnProperty(r) &&
(!this._methods[r] ||
(this._methods[r] &&
typeof routes[r] === 'object' &&
!Array.isArray(routes[r])))
) {
current = exact = regexp + this.delimiter + r;
if (!this.strict) {
exact += `[${this.delimiter}]?`;
}
match = path.match(new RegExp(`^${exact}`));
if (!match) {
continue;
}
if (match[0] && match[0] == path && routes[r][method]) {
next = [[routes[r].before, routes[r][method]].filter(Boolean)];
next.after = [routes[r].after].filter(Boolean);
next.matched = true;
next.captures = match.slice(1);
if (this.recurse && routes === this.routes) {
next.push([routes.before, routes.on].filter(Boolean));
next.after = next.after.concat([routes.after].filter(Boolean));
}
return filterRoutes(next);
}
next = this.traverse(method, path, routes[r], current);
if (next.matched) {
if (next.length > 0) {
fns = fns.concat(next);
}
if (this.recurse) {
fns.push([routes[r].before, routes[r].on].filter(Boolean));
next.after = next.after.concat([routes[r].after].filter(Boolean));
if (routes === this.routes) {
fns.push([routes.before, routes.on].filter(Boolean));
next.after = next.after.concat([routes.after].filter(Boolean));
}
}
fns.matched = true;
fns.captures = next.captures;
fns.after = next.after;
return filterRoutes(fns);
}
}
}
return false;
};
Router.prototype.insert = function (method, path, route, parent) {
let methodType;
let parentType;
let isArray;
let nested;
let part;
path = path.filter(function (p) {
return p && p.length > 0;
});
parent = parent || this.routes;
part = path.shift();
if (/\:|\*/.test(part) && !/\\d|\\w/.test(part)) {
part = regifyString(part, this.params);
}
if (path.length > 0) {
parent[part] = parent[part] || {};
return this.insert(method, path, route, parent[part]);
}
if (!part && !path.length && parent === this.routes) {
methodType = typeof parent[method];
switch (methodType) {
case 'function':
parent[method] = [parent[method], route];
return;
case 'object':
parent[method].push(route);
return;
case 'undefined':
parent[method] = route;
return;
}
return;
}
parentType = typeof parent[part];
isArray = Array.isArray(parent[part]);
if (parent[part] && !isArray && parentType == 'object') {
methodType = typeof parent[part][method];
switch (methodType) {
case 'function':
parent[part][method] = [parent[part][method], route];
return;
case 'object':
parent[part][method].push(route);
return;
case 'undefined':
parent[part][method] = route;
return;
}
} else if (parentType == 'undefined') {
nested = {};
nested[method] = route;
parent[part] = nested;
return;
}
throw new Error(`Invalid route context: ${parentType}`);
};
Router.prototype.extend = function (methods) {
const self = this;
const len = methods.length;
let i;
function extend(method) {
self.
|
_metho
|
identifier_name
|
|
director.ts
|
e() {
if (this.mode === 'modern') {
this.history === true ? window.onpopstate() : window.onhashchange();
} else {
this.onHashChanged();
}
},
init(fn, history) {
const self = this;
this.history = history;
if (!Router.listeners) {
Router.listeners = [];
}
function onchange(onChangeEvent) {
for (let i = 0, l = Router.listeners.length; i < l; i++) {
Router.listeners[i](onChangeEvent);
}
}
// note IE8 is being counted as 'modern' because it has the hashchange event
if (
'onhashchange' in window &&
(document.documentMode === undefined || document.documentMode > 7)
) {
// At least for now HTML5 history is available for 'modern' browsers only
if (this.history === true) {
// There is an old bug in Chrome that causes onpopstate to fire even
// upon initial page load. Since the handler is run manually in init(),
// this would cause Chrome to run it twise. Currently the only
// workaround seems to be to set the handler after the initial page load
// http://code.google.com/p/chromium/issues/detail?id=63040
setTimeout(function () {
window.onpopstate = onchange;
}, 500);
} else {
window.onhashchange = onchange;
}
this.mode = 'modern';
} else {
//
// IE support, based on a concept by Erik Arvidson ...
//
const frame = document.createElement('iframe');
frame.id = 'state-frame';
frame.style.display = 'none';
document.body.appendChild(frame);
this.writeFrame('');
if ('onpropertychange' in document && 'attachEvent' in document) {
document.attachEvent('onpropertychange', function () {
if (event.propertyName === 'location') {
self.check();
}
});
}
window.setInterval(function () {
self.check();
}, 50);
this.onHashChanged = onchange;
this.mode = 'legacy';
}
Router.listeners.push(fn);
return this.mode;
},
destroy(fn) {
if (!Router || !Router.listeners) {
return;
}
const { listeners } = Router;
for (let i = listeners.length - 1; i >= 0; i--) {
if (listeners[i] === fn) {
listeners.splice(i, 1);
}
}
},
setHash(s) {
// Mozilla always adds an entry to the history
if (this.mode === 'legacy') {
this.writeFrame(s);
}
if (this.history === true) {
window.history.pushState({}, document.title, s);
// Fire an onpopstate event manually since pushing does not obviously
// trigger the pop event.
this.fire();
} else {
dloc.hash = s[0] === '/' ? s : `/${s}`;
}
return this;
},
writeFrame(s) {
// IE support...
const f = document.getElementById('state-frame');
const d = f.contentDocument || f.contentWindow.document;
d.open();
d.write(
`<script>_hash = '${s}'; onload = parent.listener.syncHash;<script>`
);
d.close();
},
syncHash() {
// IE support...
const s = this._hash;
if (s != dloc.hash) {
dloc.hash = s;
}
return this;
},
onHashChanged() {},
};
router = function (routes) {
// 执行方法也返回对象 var a = a(); var a = new a();
if (!(this instanceof Router)) return new Router(routes);
this.params = {};
this.routes = {};
this.methods = ['on', 'once', 'after', 'before'];
this.scope = [];
this._methods = {};
this._insert = this.insert;
this.insert = this.insertEx;
this.historySupport =
(window.history != null ? window.history.pushState : null) != null;
this.configure();
this.mount(routes || {});
};
const Router = router;
Router.prototype.init = function (r) {
const self = this;
let routeTo;
this.handler = function (onChangeEvent) {
const newURL =
(onChangeEvent && onChangeEvent.newURL) || window.location.hash;
const url =
self.history === true ? self.getPath() : newURL.replace(/.*#/, '');
self.dispatch('on', url.charAt(0) === '/' ? url : `/${url}`);
};
listener.init(this.handler, this.history);
if (this.history === false) {
if (dlocHashEmpty() && r) {
dloc.hash = r;
} else if (!dlocHashEmpty()) {
self.dispatch('on', `/${dloc.hash.replace(/^(#\/|#|\/)/, '')}`);
}
} else {
if (this.convert_hash_in_init) {
// Use hash as route
routeTo =
dlocHashEmpty() && r
? r
: !dlocHashEmpty()
? dloc.hash.replace(/^#/, '')
: null;
if (routeTo) {
window.history.replaceState({}, document.title, routeTo);
}
} else {
// Use canonical url
routeTo = this.getPath();
}
// Router has been initialized, but due to the chrome bug it will not
// yet actually route HTML5 history state changes. Thus, decide if should route.
if (routeTo || this.run_in_init === true) {
this.handler();
}
}
return this;
};
Router.prototype.explode = function () {
let v = this.history === true ? this.getPath() : dloc.hash;
if (v.charAt(1) === '/') {
v = v.slice(1);
}
return v.slice(1, v.length).split('/');
};
Router.prototype.setRoute = function (i, v, val) {
let url = this.explode();
if (typeof i === 'number' && typeof v === 'string') {
url[i] = v;
} else if (typeof val === 'string') {
url.splice(i, v, s);
} else {
url = [i];
}
listener.setHash(url.join('/'));
return url;
};
Router.prototype.insertEx = function (method, path, route, parent) {
if (method === 'once') {
method = 'on';
route = (function (route) {
let once = false;
return function () {
if (once) return;
once = true;
return route.apply(this, arguments);
};
})(route);
}
return this._insert(method, path, route, parent);
};
Router.prototype.getRoute = function (v) {
let ret = v;
if (typeof v === 'number') {
ret = this.explode()[v];
} else if (typeof v === 'string') {
const h = this.explode();
ret = h.indexOf(v);
} else {
ret = this.explode();
}
return ret;
};
Router.prototype.destroy = function () {
listener.destroy(this.handler);
return this;
};
Router.prototype.getPath = function () {
let path = window.location.pathname;
if (path.substr(0, 1) !== '/') {
path = `/${path}`;
}
return path;
};
function _every(arr, iterator) {
for (let i = 0; i < arr.length; i += 1) {
if (iterator(arr[i], i, arr) === false) {
return;
}
}
}
function _flatten(arr) {
let flat = [];
for (let i = 0, n = arr.length; i < n; i++) {
flat = flat.concat(arr[i]);
}
return flat;
}
function _asyncEverySeries(arr, iterator, callback) {
if (!arr.length) {
return callback();
}
let completed = 0;
(function iterate() {
iterator(arr[completed], function (err) {
if (err || err === false) {
callback(err);
callback = function () {};
} else {
completed += 1;
if (completed === arr.length) {
callback();
} else {
iterate();
}
}
});
})();
}
function paramifyString(str, params, mod) {
mod = str;
for (const param in params) {
if (params.hasOwnProperty(param)) {
mod = params[param](str);
if (mod !== str) {
break;
}
}
}
return mod === str ? '([._a-zA-Z0-9-%()]+)' : mod;
}
function regifyString(str, params) {
let matches;
let last = 0;
let out = '';
while ((matches = str.substr(last).match(/[^\w\d\- %@&]*\*[^\w\d\- %@&]*/))) {
last = matches.index + matches[0].length;
matches[0] = matches[0].replace(/^\*/, '([_.()!\\ %@&a-zA-Z0-9-]+)');
out += str.substr(0
|
nst h = dloc.hash;
if (h != this.hash) {
this.hash = h;
this.onHashChanged();
}
},
fir
|
identifier_body
|
|
director.ts
|
//
// IE support, based on a concept by Erik Arvidson ...
//
const frame = document.createElement('iframe');
frame.id = 'state-frame';
frame.style.display = 'none';
document.body.appendChild(frame);
this.writeFrame('');
if ('onpropertychange' in document && 'attachEvent' in document) {
document.attachEvent('onpropertychange', function () {
if (event.propertyName === 'location') {
self.check();
}
});
}
window.setInterval(function () {
self.check();
}, 50);
this.onHashChanged = onchange;
this.mode = 'legacy';
}
Router.listeners.push(fn);
return this.mode;
},
destroy(fn) {
if (!Router || !Router.listeners) {
return;
}
const { listeners } = Router;
for (let i = listeners.length - 1; i >= 0; i--) {
if (listeners[i] === fn) {
listeners.splice(i, 1);
}
}
},
setHash(s) {
// Mozilla always adds an entry to the history
if (this.mode === 'legacy') {
this.writeFrame(s);
}
if (this.history === true) {
window.history.pushState({}, document.title, s);
// Fire an onpopstate event manually since pushing does not obviously
// trigger the pop event.
this.fire();
} else {
dloc.hash = s[0] === '/' ? s : `/${s}`;
}
return this;
},
writeFrame(s) {
// IE support...
const f = document.getElementById('state-frame');
const d = f.contentDocument || f.contentWindow.document;
d.open();
d.write(
`<script>_hash = '${s}'; onload = parent.listener.syncHash;<script>`
);
d.close();
},
syncHash() {
// IE support...
const s = this._hash;
if (s != dloc.hash) {
dloc.hash = s;
}
return this;
},
onHashChanged() {},
};
router = function (routes) {
// 执行方法也返回对象 var a = a(); var a = new a();
if (!(this instanceof Router)) return new Router(routes);
this.params = {};
this.routes = {};
this.methods = ['on', 'once', 'after', 'before'];
this.scope = [];
this._methods = {};
this._insert = this.insert;
this.insert = this.insertEx;
this.historySupport =
(window.history != null ? window.history.pushState : null) != null;
this.configure();
this.mount(routes || {});
};
const Router = router;
Router.prototype.init = function (r) {
const self = this;
let routeTo;
this.handler = function (onChangeEvent) {
const newURL =
(onChangeEvent && onChangeEvent.newURL) || window.location.hash;
const url =
self.history === true ? self.getPath() : newURL.replace(/.*#/, '');
self.dispatch('on', url.charAt(0) === '/' ? url : `/${url}`);
};
listener.init(this.handler, this.history);
if (this.history === false) {
if (dlocHashEmpty() && r) {
dloc.hash = r;
} else if (!dlocHashEmpty()) {
self.dispatch('on', `/${dloc.hash.replace(/^(#\/|#|\/)/, '')}`);
}
} else {
if (this.convert_hash_in_init) {
// Use hash as route
routeTo =
dlocHashEmpty() && r
? r
: !dlocHashEmpty()
? dloc.hash.replace(/^#/, '')
: null;
if (routeTo) {
window.history.replaceState({}, document.title, routeTo);
}
} else {
// Use canonical url
routeTo = this.getPath();
}
// Router has been initialized, but due to the chrome bug it will not
// yet actually route HTML5 history state changes. Thus, decide if should route.
if (routeTo || this.run_in_init === true) {
this.handler();
}
}
return this;
};
Router.prototype.explode = function () {
let v = this.history === true ? this.getPath() : dloc.hash;
if (v.charAt(1) === '/') {
v = v.slice(1);
}
return v.slice(1, v.length).split('/');
};
Router.prototype.setRoute = function (i, v, val) {
let url = this.explode();
if (typeof i === 'number' && typeof v === 'string') {
url[i] = v;
} else if (typeof val === 'string') {
url.splice(i, v, s);
} else {
url = [i];
}
listener.setHash(url.join('/'));
return url;
};
Router.prototype.insertEx = function (method, path, route, parent) {
if (method === 'once') {
method = 'on';
route = (function (route) {
let once = false;
return function () {
if (once) return;
once = true;
return route.apply(this, arguments);
};
})(route);
}
return this._insert(method, path, route, parent);
};
Router.prototype.getRoute = function (v) {
let ret = v;
if (typeof v === 'number') {
ret = this.explode()[v];
} else if (typeof v === 'string') {
const h = this.explode();
ret = h.indexOf(v);
} else {
ret = this.explode();
}
return ret;
};
Router.prototype.destroy = function () {
listener.destroy(this.handler);
return this;
};
Router.prototype.getPath = function () {
let path = window.location.pathname;
if (path.substr(0, 1) !== '/') {
path = `/${path}`;
}
return path;
};
function _every(arr, iterator) {
for (let i = 0; i < arr.length; i += 1) {
if (iterator(arr[i], i, arr) === false) {
return;
}
}
}
function _flatten(arr) {
let flat = [];
for (let i = 0, n = arr.length; i < n; i++) {
flat = flat.concat(arr[i]);
}
return flat;
}
function _asyncEverySeries(arr, iterator, callback) {
if (!arr.length) {
return callback();
}
let completed = 0;
(function iterate() {
iterator(arr[completed], function (err) {
if (err || err === false) {
callback(err);
callback = function () {};
} else {
completed += 1;
if (completed === arr.length) {
callback();
} else {
iterate();
}
}
});
})();
}
function paramifyString(str, params, mod) {
mod = str;
for (const param in params) {
if (params.hasOwnProperty(param)) {
mod = params[param](str);
if (mod !== str) {
break;
}
}
}
return mod === str ? '([._a-zA-Z0-9-%()]+)' : mod;
}
function regifyString(str, params) {
let matches;
let last = 0;
let out = '';
while ((matches = str.substr(last).match(/[^\w\d\- %@&]*\*[^\w\d\- %@&]*/))) {
last = matches.index + matches[0].length;
matches[0] = matches[0].replace(/^\*/, '([_.()!\\ %@&a-zA-Z0-9-]+)');
out += str.substr(0, matches.index) + matches[0];
}
str = out += str.substr(last);
const captures = str.match(/:([^\/]+)/gi);
let capture;
let length;
if (captures) {
length = captures.length;
for (let i = 0; i < length; i++) {
capture = captures[i];
if (capture.slice(0, 2) === '::') {
str = capture.slice(1);
} else {
str = str.replace(capture, paramifyString(capture, params));
}
}
}
return str;
}
function terminator(routes, delimiter, start, stop) {
let last = 0;
let left = 0;
let right = 0;
var start = (start || '(').toString();
var stop = (stop || ')').toString();
let i;
for (i = 0; i < routes.length; i++) {
|
// At least for now HTML5 history is available for 'modern' browsers only
if (this.history === true) {
// There is an old bug in Chrome that causes onpopstate to fire even
// upon initial page load. Since the handler is run manually in init(),
// this would cause Chrome to run it twise. Currently the only
// workaround seems to be to set the handler after the initial page load
// http://code.google.com/p/chromium/issues/detail?id=63040
setTimeout(function () {
window.onpopstate = onchange;
}, 500);
} else {
window.onhashchange = onchange;
}
this.mode = 'modern';
} else {
|
conditional_block
|
|
director.ts
|
'once', 'after', 'before'];
this.scope = [];
this._methods = {};
this._insert = this.insert;
this.insert = this.insertEx;
this.historySupport =
(window.history != null ? window.history.pushState : null) != null;
this.configure();
this.mount(routes || {});
};
const Router = router;
Router.prototype.init = function (r) {
const self = this;
let routeTo;
this.handler = function (onChangeEvent) {
const newURL =
(onChangeEvent && onChangeEvent.newURL) || window.location.hash;
const url =
self.history === true ? self.getPath() : newURL.replace(/.*#/, '');
self.dispatch('on', url.charAt(0) === '/' ? url : `/${url}`);
};
listener.init(this.handler, this.history);
if (this.history === false) {
if (dlocHashEmpty() && r) {
dloc.hash = r;
} else if (!dlocHashEmpty()) {
self.dispatch('on', `/${dloc.hash.replace(/^(#\/|#|\/)/, '')}`);
}
} else {
if (this.convert_hash_in_init) {
// Use hash as route
routeTo =
dlocHashEmpty() && r
? r
: !dlocHashEmpty()
? dloc.hash.replace(/^#/, '')
: null;
if (routeTo) {
window.history.replaceState({}, document.title, routeTo);
}
} else {
// Use canonical url
routeTo = this.getPath();
}
// Router has been initialized, but due to the chrome bug it will not
// yet actually route HTML5 history state changes. Thus, decide if should route.
if (routeTo || this.run_in_init === true) {
this.handler();
}
}
return this;
};
Router.prototype.explode = function () {
let v = this.history === true ? this.getPath() : dloc.hash;
if (v.charAt(1) === '/') {
v = v.slice(1);
}
return v.slice(1, v.length).split('/');
};
Router.prototype.setRoute = function (i, v, val) {
let url = this.explode();
if (typeof i === 'number' && typeof v === 'string') {
url[i] = v;
} else if (typeof val === 'string') {
url.splice(i, v, s);
} else {
url = [i];
}
listener.setHash(url.join('/'));
return url;
};
Router.prototype.insertEx = function (method, path, route, parent) {
if (method === 'once') {
method = 'on';
route = (function (route) {
let once = false;
return function () {
if (once) return;
once = true;
return route.apply(this, arguments);
};
})(route);
}
return this._insert(method, path, route, parent);
};
Router.prototype.getRoute = function (v) {
let ret = v;
if (typeof v === 'number') {
ret = this.explode()[v];
} else if (typeof v === 'string') {
const h = this.explode();
ret = h.indexOf(v);
} else {
ret = this.explode();
}
return ret;
};
Router.prototype.destroy = function () {
listener.destroy(this.handler);
return this;
};
Router.prototype.getPath = function () {
let path = window.location.pathname;
if (path.substr(0, 1) !== '/') {
path = `/${path}`;
}
return path;
};
function _every(arr, iterator) {
for (let i = 0; i < arr.length; i += 1) {
if (iterator(arr[i], i, arr) === false) {
return;
}
}
}
function _flatten(arr) {
let flat = [];
for (let i = 0, n = arr.length; i < n; i++) {
flat = flat.concat(arr[i]);
}
return flat;
}
function _asyncEverySeries(arr, iterator, callback) {
if (!arr.length) {
return callback();
}
let completed = 0;
(function iterate() {
iterator(arr[completed], function (err) {
if (err || err === false) {
callback(err);
callback = function () {};
} else {
completed += 1;
if (completed === arr.length) {
callback();
} else {
iterate();
}
}
});
})();
}
function paramifyString(str, params, mod) {
mod = str;
for (const param in params) {
if (params.hasOwnProperty(param)) {
mod = params[param](str);
if (mod !== str) {
break;
}
}
}
return mod === str ? '([._a-zA-Z0-9-%()]+)' : mod;
}
function regifyString(str, params) {
let matches;
let last = 0;
let out = '';
while ((matches = str.substr(last).match(/[^\w\d\- %@&]*\*[^\w\d\- %@&]*/))) {
last = matches.index + matches[0].length;
matches[0] = matches[0].replace(/^\*/, '([_.()!\\ %@&a-zA-Z0-9-]+)');
out += str.substr(0, matches.index) + matches[0];
}
str = out += str.substr(last);
const captures = str.match(/:([^\/]+)/gi);
let capture;
let length;
if (captures) {
length = captures.length;
for (let i = 0; i < length; i++) {
capture = captures[i];
if (capture.slice(0, 2) === '::') {
str = capture.slice(1);
} else {
str = str.replace(capture, paramifyString(capture, params));
}
}
}
return str;
}
function terminator(routes, delimiter, start, stop) {
let last = 0;
let left = 0;
let right = 0;
var start = (start || '(').toString();
var stop = (stop || ')').toString();
let i;
for (i = 0; i < routes.length; i++) {
const chunk = routes[i];
if (
chunk.indexOf(start, last) > chunk.indexOf(stop, last) ||
(~chunk.indexOf(start, last) && !~chunk.indexOf(stop, last)) ||
(!~chunk.indexOf(start, last) && ~chunk.indexOf(stop, last))
) {
left = chunk.indexOf(start, last);
right = chunk.indexOf(stop, last);
if ((~left && !~right) || (!~left && ~right)) {
const tmp = routes.slice(0, (i || 1) + 1).join(delimiter);
routes = [tmp].concat(routes.slice((i || 1) + 1));
}
last = (right > left ? right : left) + 1;
i = 0;
} else {
last = 0;
}
}
return routes;
}
const QUERY_SEPARATOR = /\?.*/;
Router.prototype.configure = function (options) {
options = options || {};
for (let i = 0; i < this.methods.length; i++) {
this._methods[this.methods[i]] = true;
}
this.recurse = options.recurse || this.recurse || false;
this.async = options.async || false;
this.delimiter = options.delimiter || '/';
this.strict = typeof options.strict === 'undefined' ? true : options.strict;
this.notfound = options.notfound;
this.resource = options.resource;
this.history = (options.html5history && this.historySupport) || false;
this.run_in_init =
this.history === true && options.run_handler_in_init !== false;
this.convert_hash_in_init =
this.history === true && options.convert_hash_in_init !== false;
this.every = {
after: options.after || null,
before: options.before || null,
on: options.on || null,
};
return this;
};
Router.prototype.param = function (token, matcher) {
if (token[0] !== ':') {
token = `:${token}`;
}
const compiled = new RegExp(token, 'g');
|
this.params[token] = function (str) {
return str.replace(compiled, matcher.source || matcher);
};
return this;
};
Router.prototype.on = Router.prototype.route = function (method, path, route) {
const self = this;
if (!route && typeof path === 'function') {
route = path;
path = method;
method = 'on';
}
if (Array.isArray(path)) {
return path.forEach(function (p) {
self.on(method, p, route);
});
}
if (path.source) {
path = path.source.replace(/\\\//gi, '/');
}
if (Array.isArray(method)) {
return method.forEach(function (m) {
self.on(m.toLowerCase(), path, route);
});
}
path = path.split(new RegExp(this.delimiter));
path = terminator(path, this.delimiter);
this.insert(method, this.scope.concat(path), route);
};
Router.prototype.path = function (path, routesFn) {
const self = this;
const { length }
|
random_line_split
|
|
junk.py
|
# In[16]:
# put in init
def findFFTloc(baseline,imageShapeAlong1Axis,wavel_lambda,plateScale,lOverD=1.):
# returns the FFT pixel locations equivalent to a certain pixel distance on the science image
# baseline: distance in physical space in the pupil plane (in m)
# imageShapeAlong1Axis: length of one side of the input image (needs to be square)
# wavel_lambda: wavelength of light (in m)
# plateScale: detector plate scale (in asec/pix)
# lOverD: option if we are interested in the circular Airy rings (values 1.22, etc.)
line_diam_pixOnScience = lOverD*(wavel_lambda*asecInRad)/(baseline*plateScale) # distance in pixels on science detector
line_diam_freq = np.divide(1.,line_diam_pixOnScience) # the corresponding frequency
line_diam_pixOnFFT_L = 0.5*imageShapeAlong1Axis - np.divide(line_diam_freq,np.divide(1.,imageShapeAlong1Axis)) # find number of 'frequency pixels' multiply by units of l/D (like 1.22), and add to central x frequency pixel
line_diam_pixOnFFT_H = 0.5*imageShapeAlong1Axis + np.divide(line_diam_freq,np.divide(1.,imageShapeAlong1Axis)) # find number of 'frequency pixels' multiply by units of l/D (like 1.22), and add to central x frequency pixel
return line_diam_pixOnFFT_L, line_diam_pixOnFFT_H # the lower and higher values around freq of zero
# In[17]:
def normalVector(sciImg):
## fit a plane by finding the (normalized) normal vector to the best-fit plane
# make x, y coords and flatten x, y, and z for putting into least-squares matrix equation
X,Y = np.meshgrid(np.arange(0,np.shape(sciImg)[0]),
np.arange(0,np.shape(sciImg)[1]))
Xflat_T = np.transpose(X.flatten())
Yflat_T = np.transpose(Y.flatten())
onesVec = np.ones(np.size(Xflat_T))
Zflat_T = np.transpose(sciImg.flatten())
# remove nans so we can fit planes
Xflat_T = Xflat_T[~np.isnan(Zflat_T)]
Yflat_T = Yflat_T[~np.isnan(Zflat_T)]
onesVec = onesVec[~np.isnan(Zflat_T)]
Zflat_T = Zflat_T[~np.isnan(Zflat_T)]
# For a plane ax+by+c=z, the normal vector is [a,b,c]. To find this,
# solve the matrix equation
# AC = B, where
# A are the x and y coords: [[x0,y0,1],[x1,y1,1],...[xN,yN,1]]
# C are the coefficients we want: [[a],[b],[c]]
# B is the z data: [[z0],[z1],...[zN]]
# C = A+B, where A+ is the pseudoinverse of A, or A+ = ((A.T*A)^(-1))*A.T*B
Amatrix = np.transpose(np.concatenate(([Xflat_T],[Yflat_T],[onesVec]), axis =0))
Bmatrix = Zflat_T
# note that normVec = C
normVec = np.dot(np.dot( np.linalg.pinv(np.dot(Amatrix.transpose(), Amatrix)), Amatrix.transpose()), Bmatrix)
return normVec
# In[27]:
def fftMask(sciImg,wavel_lambda,plateScale):
# sciImg: this is actually the FFT image, not the science detector image
# wavel_lambda: wavelenth of the observation
# plateScale: plate scale of the detector (asec/pixel)
# make division lines separating different parts of the PSF
line_M1diam_pixOnFFT = findFFTloc(8.25,np.shape(sciImg)[0],wavel_lambda,plateScale)
line_center2center_pixOnFFT = findFFTloc(14.4,np.shape(sciImg)[0],wavel_lambda,plateScale)
line_edge2edge_pixOnFFT = findFFTloc(22.65,np.shape(sciImg)[0],wavel_lambda,plateScale)
# define circles
circRad = 60 # pixels in FFT space
circle_highFreqPerfect_L = CirclePixelRegion(center=PixCoord(x=line_center2center_pixOnFFT[0], y=0.5*np.shape(sciImg)[0]), radius=circRad)
circle_highFreqPerfect_R = CirclePixelRegion(center=PixCoord(x=line_center2center_pixOnFFT[1], y=0.5*np.shape(sciImg)[0]), radius=circRad)
circle_lowFreqPerfect = CirclePixelRegion(center=PixCoord(x=0.5*np.shape(sciImg)[1], y=0.5*np.shape(sciImg)[0]), radius=circRad)
# define central rectangular region that includes all three nodes
rect_pix = PolygonPixelRegion(vertices=PixCoord(x=[line_edge2edge_pixOnFFT[0],line_edge2edge_pixOnFFT[1],line_edge2edge_pixOnFFT[1],line_edge2edge_pixOnFFT[0]],
y=[line_M1diam_pixOnFFT[1],line_M1diam_pixOnFFT[1],line_M1diam_pixOnFFT[0],line_M1diam_pixOnFFT[0]]))
# make the masks
mask_circHighFreq_L = circle_highFreqPerfect_L.to_mask()
mask_circHighFreq_R = circle_highFreqPerfect_R.to_mask()
mask_circLowFreq = circle_lowFreqPerfect.to_mask()
mask_rect = rect_pix.to_mask()
# apply the masks
sciImg1 = np.copy(sciImg) # initialize arrays of same size as science image
sciImg2 = np.copy(sciImg)
sciImg3 = np.copy(sciImg)
sciImg4 = np.copy(sciImg)
# region 1
sciImg1.fill(np.nan) # initialize arrays of nans
mask_circHighFreq_L.data[mask_circHighFreq_L.data == 0] = np.nan # make zeros within mask cutout (but not in the mask itself) nans
sciImg1[mask_circHighFreq_L.bbox.slices] = mask_circHighFreq_L.data # place the mask cutout (consisting only of 1s) onto the array of nans
sciImg1 = np.multiply(sciImg1,sciImg) # 'transmit' the original science image through the mask
# region 2
sciImg2.fill(np.nan) # initialize arrays of nans
mask_circHighFreq_R.data[mask_circHighFreq_R.data == 0] = np.nan # make zeros within mask cutout (but not in the mask itself) nans
sciImg2[mask_circHighFreq_R.bbox.slices] = mask_circHighFreq_R.data # place the mask cutout (consisting only of 1s) onto the array of nans
sciImg2 = np.multiply(sciImg2,sciImg) # 'transmit' the original science image through the mask
# region 3
sciImg3.fill(np.nan) # initialize arrays of nans
mask_circLowFreq.data[mask_circLowFreq.data == 0] = np.nan # make zeros within mask cutout (but not in the mask itself) nans
sciImg3[mask_circLowFreq.bbox.slices] = mask_circLowFreq.data # place the mask cutout (consisting only of 1s) onto the array of nans
sciImg3 = np.multiply(sciImg3,sciImg) # 'transmit' the original science image through the mask
# region 4
sciImg4.fill(np.nan) # initialize arrays of nans
mask_rect.data[mask_rect.data == 0] = np.nan # make zeros within mask cutout (but not in the mask itself) nans
sciImg4[mask_rect.bbox.slices] = mask_rect.data # place the mask cutout (consisting only of 1s) onto the array of nans
sciImg4 = np.multiply(sciImg4,sciImg) # 'transmit' the original science image through the mask
# return medians of regions under masks
med_highFreqPerfect_L = np.nanmedian(sciImg1)
med_highFreqPerfect_R = np.nanmedian(sciImg2)
med_lowFreqPerfect = np.nanmedian(sciImg3)
med_rect = np.nanmedian(sciImg4)
# return normal vectors corresponding to [x,y,z] to surfaces (x- and y- components are of interest)
normVec_highFreqPerfect_L = normalVector(sciImg1)
normVec_highFreqPerfect_R = normalVector
|
max2 = 1.635
min2 = 2.233
max3 = 2.679
min3 = 3.238
max4 = 3.699
|
random_line_split
|
|
junk.py
|
plate scale (in asec/pix)
# lOverD: option if we are interested in the circular Airy rings (values 1.22, etc.)
line_diam_pixOnScience = lOverD*(wavel_lambda*asecInRad)/(baseline*plateScale) # distance in pixels on science detector
line_diam_freq = np.divide(1.,line_diam_pixOnScience) # the corresponding frequency
line_diam_pixOnFFT_L = 0.5*imageShapeAlong1Axis - np.divide(line_diam_freq,np.divide(1.,imageShapeAlong1Axis)) # find number of 'frequency pixels' multiply by units of l/D (like 1.22), and add to central x frequency pixel
line_diam_pixOnFFT_H = 0.5*imageShapeAlong1Axis + np.divide(line_diam_freq,np.divide(1.,imageShapeAlong1Axis)) # find number of 'frequency pixels' multiply by units of l/D (like 1.22), and add to central x frequency pixel
return line_diam_pixOnFFT_L, line_diam_pixOnFFT_H # the lower and higher values around freq of zero
# In[17]:
def normalVector(sciImg):
## fit a plane by finding the (normalized) normal vector to the best-fit plane
# make x, y coords and flatten x, y, and z for putting into least-squares matrix equation
X,Y = np.meshgrid(np.arange(0,np.shape(sciImg)[0]),
np.arange(0,np.shape(sciImg)[1]))
Xflat_T = np.transpose(X.flatten())
Yflat_T = np.transpose(Y.flatten())
onesVec = np.ones(np.size(Xflat_T))
Zflat_T = np.transpose(sciImg.flatten())
# remove nans so we can fit planes
Xflat_T = Xflat_T[~np.isnan(Zflat_T)]
Yflat_T = Yflat_T[~np.isnan(Zflat_T)]
onesVec = onesVec[~np.isnan(Zflat_T)]
Zflat_T = Zflat_T[~np.isnan(Zflat_T)]
# For a plane ax+by+c=z, the normal vector is [a,b,c]. To find this,
# solve the matrix equation
# AC = B, where
# A are the x and y coords: [[x0,y0,1],[x1,y1,1],...[xN,yN,1]]
# C are the coefficients we want: [[a],[b],[c]]
# B is the z data: [[z0],[z1],...[zN]]
# C = A+B, where A+ is the pseudoinverse of A, or A+ = ((A.T*A)^(-1))*A.T*B
Amatrix = np.transpose(np.concatenate(([Xflat_T],[Yflat_T],[onesVec]), axis =0))
Bmatrix = Zflat_T
# note that normVec = C
normVec = np.dot(np.dot( np.linalg.pinv(np.dot(Amatrix.transpose(), Amatrix)), Amatrix.transpose()), Bmatrix)
return normVec
# In[27]:
def fftMask(sciImg,wavel_lambda,plateScale):
# sciImg: this is actually the FFT image, not the science detector image
# wavel_lambda: wavelenth of the observation
# plateScale: plate scale of the detector (asec/pixel)
# make division lines separating different parts of the PSF
|
# apply the masks
sciImg1 = np.copy(sciImg) # initialize arrays of same size as science image
sciImg2 = np.copy(sciImg)
sciImg3 = np.copy(sciImg)
sciImg4 = np.copy(sciImg)
# region 1
sciImg1.fill(np.nan) # initialize arrays of nans
mask_circHighFreq_L.data[mask_circHighFreq_L.data == 0] = np.nan # make zeros within mask cutout (but not in the mask itself) nans
sciImg1[mask_circHighFreq_L.bbox.slices] = mask_circHighFreq_L.data # place the mask cutout (consisting only of 1s) onto the array of nans
sciImg1 = np.multiply(sciImg1,sciImg) # 'transmit' the original science image through the mask
# region 2
sciImg2.fill(np.nan) # initialize arrays of nans
mask_circHighFreq_R.data[mask_circHighFreq_R.data == 0] = np.nan # make zeros within mask cutout (but not in the mask itself) nans
sciImg2[mask_circHighFreq_R.bbox.slices] = mask_circHighFreq_R.data # place the mask cutout (consisting only of 1s) onto the array of nans
sciImg2 = np.multiply(sciImg2,sciImg) # 'transmit' the original science image through the mask
# region 3
sciImg3.fill(np.nan) # initialize arrays of nans
mask_circLowFreq.data[mask_circLowFreq.data == 0] = np.nan # make zeros within mask cutout (but not in the mask itself) nans
sciImg3[mask_circLowFreq.bbox.slices] = mask_circLowFreq.data # place the mask cutout (consisting only of 1s) onto the array of nans
sciImg3 = np.multiply(sciImg3,sciImg) # 'transmit' the original science image through the mask
# region 4
sciImg4.fill(np.nan) # initialize arrays of nans
mask_rect.data[mask_rect.data == 0] = np.nan # make zeros within mask cutout (but not in the mask itself) nans
sciImg4[mask_rect.bbox.slices] = mask_rect.data # place the mask cutout (consisting only of 1s) onto the array of nans
sciImg4 = np.multiply(sciImg4,sciImg) # 'transmit' the original science image through the mask
# return medians of regions under masks
med_highFreqPerfect_L = np.nanmedian(sciImg1)
med_highFreqPerfect_R = np.nanmedian(sciImg2)
med_lowFreqPerfect = np.nanmedian(sciImg3)
med_rect = np.nanmedian(sciImg4)
# return normal vectors corresponding to [x,y,z] to surfaces (x- and y- components are of interest)
normVec_highFreqPerfect_L = normalVector(sciImg1)
normVec_highFreqPerfect_R = normalVector(sciImg2)
normVec_lowFreqPerfect = normalVector(sciImg3)
normVec_rect = normalVector(sciImg4)
# generate images showing footprints of regions of interest
# (comment this bit in/out as desired)
'''
plt.imshow(sciImg1, origin='lower')
plt.show()
plt.imshow(sciImg2, origin='lower')
plt.show()
plt.imshow(sciImg3, origin='lower')
plt.show()
plt.imshow(sciImg4, origin='lower')
plt.show()
plt.clf()
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
cax = ax.imshow(sciImg
|
line_M1diam_pixOnFFT = findFFTloc(8.25,np.shape(sciImg)[0],wavel_lambda,plateScale)
line_center2center_pixOnFFT = findFFTloc(14.4,np.shape(sciImg)[0],wavel_lambda,plateScale)
line_edge2edge_pixOnFFT = findFFTloc(22.65,np.shape(sciImg)[0],wavel_lambda,plateScale)
# define circles
circRad = 60 # pixels in FFT space
circle_highFreqPerfect_L = CirclePixelRegion(center=PixCoord(x=line_center2center_pixOnFFT[0], y=0.5*np.shape(sciImg)[0]), radius=circRad)
circle_highFreqPerfect_R = CirclePixelRegion(center=PixCoord(x=line_center2center_pixOnFFT[1], y=0.5*np.shape(sciImg)[0]), radius=circRad)
circle_lowFreqPerfect = CirclePixelRegion(center=PixCoord(x=0.5*np.shape(sciImg)[1], y=0.5*np.shape(sciImg)[0]), radius=circRad)
# define central rectangular region that includes all three nodes
rect_pix = PolygonPixelRegion(vertices=PixCoord(x=[line_edge2edge_pixOnFFT[0],line_edge2edge_pixOnFFT[1],line_edge2edge_pixOnFFT[1],line_edge2edge_pixOnFFT[0]],
y=[line_M1diam_pixOnFFT[1],line_M1diam_pixOnFFT[1],line_M1diam_pixOnFFT[0],line_M1diam_pixOnFFT[0]]))
# make the masks
mask_circHighFreq_L = circle_highFreqPerfect_L.to_mask()
mask_circHighFreq_R = circle_highFreqPerfect_R.to_mask()
mask_circLowFreq = circle_lowFreqPerfect.to_mask()
mask_rect = rect_pix.to_mask()
|
identifier_body
|
junk.py
|
# plateScale: plate scale of the detector (asec/pixel)
# make division lines separating different parts of the PSF
line_M1diam_pixOnFFT = findFFTloc(8.25,np.shape(sciImg)[0],wavel_lambda,plateScale)
line_center2center_pixOnFFT = findFFTloc(14.4,np.shape(sciImg)[0],wavel_lambda,plateScale)
line_edge2edge_pixOnFFT = findFFTloc(22.65,np.shape(sciImg)[0],wavel_lambda,plateScale)
# define circles
circRad = 60 # pixels in FFT space
circle_highFreqPerfect_L = CirclePixelRegion(center=PixCoord(x=line_center2center_pixOnFFT[0], y=0.5*np.shape(sciImg)[0]), radius=circRad)
circle_highFreqPerfect_R = CirclePixelRegion(center=PixCoord(x=line_center2center_pixOnFFT[1], y=0.5*np.shape(sciImg)[0]), radius=circRad)
circle_lowFreqPerfect = CirclePixelRegion(center=PixCoord(x=0.5*np.shape(sciImg)[1], y=0.5*np.shape(sciImg)[0]), radius=circRad)
# define central rectangular region that includes all three nodes
rect_pix = PolygonPixelRegion(vertices=PixCoord(x=[line_edge2edge_pixOnFFT[0],line_edge2edge_pixOnFFT[1],line_edge2edge_pixOnFFT[1],line_edge2edge_pixOnFFT[0]],
y=[line_M1diam_pixOnFFT[1],line_M1diam_pixOnFFT[1],line_M1diam_pixOnFFT[0],line_M1diam_pixOnFFT[0]]))
# make the masks
mask_circHighFreq_L = circle_highFreqPerfect_L.to_mask()
mask_circHighFreq_R = circle_highFreqPerfect_R.to_mask()
mask_circLowFreq = circle_lowFreqPerfect.to_mask()
mask_rect = rect_pix.to_mask()
# apply the masks
sciImg1 = np.copy(sciImg) # initialize arrays of same size as science image
sciImg2 = np.copy(sciImg)
sciImg3 = np.copy(sciImg)
sciImg4 = np.copy(sciImg)
# region 1
sciImg1.fill(np.nan) # initialize arrays of nans
mask_circHighFreq_L.data[mask_circHighFreq_L.data == 0] = np.nan # make zeros within mask cutout (but not in the mask itself) nans
sciImg1[mask_circHighFreq_L.bbox.slices] = mask_circHighFreq_L.data # place the mask cutout (consisting only of 1s) onto the array of nans
sciImg1 = np.multiply(sciImg1,sciImg) # 'transmit' the original science image through the mask
# region 2
sciImg2.fill(np.nan) # initialize arrays of nans
mask_circHighFreq_R.data[mask_circHighFreq_R.data == 0] = np.nan # make zeros within mask cutout (but not in the mask itself) nans
sciImg2[mask_circHighFreq_R.bbox.slices] = mask_circHighFreq_R.data # place the mask cutout (consisting only of 1s) onto the array of nans
sciImg2 = np.multiply(sciImg2,sciImg) # 'transmit' the original science image through the mask
# region 3
sciImg3.fill(np.nan) # initialize arrays of nans
mask_circLowFreq.data[mask_circLowFreq.data == 0] = np.nan # make zeros within mask cutout (but not in the mask itself) nans
sciImg3[mask_circLowFreq.bbox.slices] = mask_circLowFreq.data # place the mask cutout (consisting only of 1s) onto the array of nans
sciImg3 = np.multiply(sciImg3,sciImg) # 'transmit' the original science image through the mask
# region 4
sciImg4.fill(np.nan) # initialize arrays of nans
mask_rect.data[mask_rect.data == 0] = np.nan # make zeros within mask cutout (but not in the mask itself) nans
sciImg4[mask_rect.bbox.slices] = mask_rect.data # place the mask cutout (consisting only of 1s) onto the array of nans
sciImg4 = np.multiply(sciImg4,sciImg) # 'transmit' the original science image through the mask
# return medians of regions under masks
med_highFreqPerfect_L = np.nanmedian(sciImg1)
med_highFreqPerfect_R = np.nanmedian(sciImg2)
med_lowFreqPerfect = np.nanmedian(sciImg3)
med_rect = np.nanmedian(sciImg4)
# return normal vectors corresponding to [x,y,z] to surfaces (x- and y- components are of interest)
normVec_highFreqPerfect_L = normalVector(sciImg1)
normVec_highFreqPerfect_R = normalVector(sciImg2)
normVec_lowFreqPerfect = normalVector(sciImg3)
normVec_rect = normalVector(sciImg4)
# generate images showing footprints of regions of interest
# (comment this bit in/out as desired)
'''
plt.imshow(sciImg1, origin='lower')
plt.show()
plt.imshow(sciImg2, origin='lower')
plt.show()
plt.imshow(sciImg3, origin='lower')
plt.show()
plt.imshow(sciImg4, origin='lower')
plt.show()
plt.clf()
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
cax = ax.imshow(sciImg, origin="lower")
ax.axhline(line_M1diam_pixOnFFT[0])
ax.axhline(line_M1diam_pixOnFFT[1])
ax.axvline(line_M1diam_pixOnFFT[0])
ax.axvline(line_M1diam_pixOnFFT[1])
ax.axvline(line_center2center_pixOnFFT[0])
ax.axvline(line_center2center_pixOnFFT[1])
ax.axvline(line_edge2edge_pixOnFFT[0])
ax.axvline(line_edge2edge_pixOnFFT[1])
ax.add_patch(circle_highFreqPerfect_L.as_patch(facecolor='none', edgecolor='blue'))
ax.add_patch(circle_highFreqPerfect_R.as_patch(facecolor='none', edgecolor='blue'))
ax.add_patch(circle_lowFreqPerfect.as_patch(facecolor='none', edgecolor='blue'))
ax.add_patch(rect_pix.as_patch(facecolor='none', edgecolor='red'))
cbar = fig.colorbar(cax)
plt.savefig("junk.pdf")
'''
dictFFTstuff = {}
dictFFTstuff["med_highFreqPerfect_L"] = med_highFreqPerfect_L
dictFFTstuff["med_highFreqPerfect_R"] = med_highFreqPerfect_R
dictFFTstuff["med_lowFreqPerfect"] = med_lowFreqPerfect
dictFFTstuff["med_rect"] = med_rect
# note vectors are [a,b,c] corresponding to the eqn Z = a*X + b*Y + c
dictFFTstuff["normVec_highFreqPerfect_L"] = normVec_highFreqPerfect_L
dictFFTstuff["normVec_highFreqPerfect_R"] = normVec_highFreqPerfect_R
dictFFTstuff["normVec_lowFreqPerfect"] = normVec_lowFreqPerfect
dictFFTstuff["normVec_rect"] = normVec_rect
return dictFFTstuff
# In[28]:
# for loop over science images to take FFT and analyze it
ampArray = []
framenumArray = []
for f in range(4249,11497): # full Altair dataset: 4249,11497
|
filename_str = stem+'lm_180507_'+str("{:0>6d}".format(f))+'.fits'
if os.path.isfile(filename_str): # if FITS file exists in the first place
print('Working on frame '+str("{:0>6d}".format(f))+' ...')
image, header = fits.getdata(filename_str,0,header=True)
# test: a perfect PSF
#image, header = fits.getdata(stem+'perfect_psf.fits',0,header=True)
# locate PSF
psf_loc = overlap_psfs.find_airy_psf(image)
# size of cookie cut-out (measured center-to-edge)
cookie_size = 100 # maximum control radius as of 2018 July corresponds to 130.0 pixels
# take FFT
cookie_cut = image[psf_loc[0]-cookie_size:psf_loc[0]+cookie_size,psf_loc[1]-cookie_size:psf_loc[1]+cookie_size]
amp, arg = fft_img(cookie_cut).fft(padding=int(5*cookie_size), mask_thresh=1e5)
|
conditional_block
|
|
junk.py
|
plate scale (in asec/pix)
# lOverD: option if we are interested in the circular Airy rings (values 1.22, etc.)
line_diam_pixOnScience = lOverD*(wavel_lambda*asecInRad)/(baseline*plateScale) # distance in pixels on science detector
line_diam_freq = np.divide(1.,line_diam_pixOnScience) # the corresponding frequency
line_diam_pixOnFFT_L = 0.5*imageShapeAlong1Axis - np.divide(line_diam_freq,np.divide(1.,imageShapeAlong1Axis)) # find number of 'frequency pixels' multiply by units of l/D (like 1.22), and add to central x frequency pixel
line_diam_pixOnFFT_H = 0.5*imageShapeAlong1Axis + np.divide(line_diam_freq,np.divide(1.,imageShapeAlong1Axis)) # find number of 'frequency pixels' multiply by units of l/D (like 1.22), and add to central x frequency pixel
return line_diam_pixOnFFT_L, line_diam_pixOnFFT_H # the lower and higher values around freq of zero
# In[17]:
def normalVector(sciImg):
## fit a plane by finding the (normalized) normal vector to the best-fit plane
# make x, y coords and flatten x, y, and z for putting into least-squares matrix equation
X,Y = np.meshgrid(np.arange(0,np.shape(sciImg)[0]),
np.arange(0,np.shape(sciImg)[1]))
Xflat_T = np.transpose(X.flatten())
Yflat_T = np.transpose(Y.flatten())
onesVec = np.ones(np.size(Xflat_T))
Zflat_T = np.transpose(sciImg.flatten())
# remove nans so we can fit planes
Xflat_T = Xflat_T[~np.isnan(Zflat_T)]
Yflat_T = Yflat_T[~np.isnan(Zflat_T)]
onesVec = onesVec[~np.isnan(Zflat_T)]
Zflat_T = Zflat_T[~np.isnan(Zflat_T)]
# For a plane ax+by+c=z, the normal vector is [a,b,c]. To find this,
# solve the matrix equation
# AC = B, where
# A are the x and y coords: [[x0,y0,1],[x1,y1,1],...[xN,yN,1]]
# C are the coefficients we want: [[a],[b],[c]]
# B is the z data: [[z0],[z1],...[zN]]
# C = A+B, where A+ is the pseudoinverse of A, or A+ = ((A.T*A)^(-1))*A.T*B
Amatrix = np.transpose(np.concatenate(([Xflat_T],[Yflat_T],[onesVec]), axis =0))
Bmatrix = Zflat_T
# note that normVec = C
normVec = np.dot(np.dot( np.linalg.pinv(np.dot(Amatrix.transpose(), Amatrix)), Amatrix.transpose()), Bmatrix)
return normVec
# In[27]:
def
|
(sciImg,wavel_lambda,plateScale):
# sciImg: this is actually the FFT image, not the science detector image
# wavel_lambda: wavelenth of the observation
# plateScale: plate scale of the detector (asec/pixel)
# make division lines separating different parts of the PSF
line_M1diam_pixOnFFT = findFFTloc(8.25,np.shape(sciImg)[0],wavel_lambda,plateScale)
line_center2center_pixOnFFT = findFFTloc(14.4,np.shape(sciImg)[0],wavel_lambda,plateScale)
line_edge2edge_pixOnFFT = findFFTloc(22.65,np.shape(sciImg)[0],wavel_lambda,plateScale)
# define circles
circRad = 60 # pixels in FFT space
circle_highFreqPerfect_L = CirclePixelRegion(center=PixCoord(x=line_center2center_pixOnFFT[0], y=0.5*np.shape(sciImg)[0]), radius=circRad)
circle_highFreqPerfect_R = CirclePixelRegion(center=PixCoord(x=line_center2center_pixOnFFT[1], y=0.5*np.shape(sciImg)[0]), radius=circRad)
circle_lowFreqPerfect = CirclePixelRegion(center=PixCoord(x=0.5*np.shape(sciImg)[1], y=0.5*np.shape(sciImg)[0]), radius=circRad)
# define central rectangular region that includes all three nodes
rect_pix = PolygonPixelRegion(vertices=PixCoord(x=[line_edge2edge_pixOnFFT[0],line_edge2edge_pixOnFFT[1],line_edge2edge_pixOnFFT[1],line_edge2edge_pixOnFFT[0]],
y=[line_M1diam_pixOnFFT[1],line_M1diam_pixOnFFT[1],line_M1diam_pixOnFFT[0],line_M1diam_pixOnFFT[0]]))
# make the masks
mask_circHighFreq_L = circle_highFreqPerfect_L.to_mask()
mask_circHighFreq_R = circle_highFreqPerfect_R.to_mask()
mask_circLowFreq = circle_lowFreqPerfect.to_mask()
mask_rect = rect_pix.to_mask()
# apply the masks
sciImg1 = np.copy(sciImg) # initialize arrays of same size as science image
sciImg2 = np.copy(sciImg)
sciImg3 = np.copy(sciImg)
sciImg4 = np.copy(sciImg)
# region 1
sciImg1.fill(np.nan) # initialize arrays of nans
mask_circHighFreq_L.data[mask_circHighFreq_L.data == 0] = np.nan # make zeros within mask cutout (but not in the mask itself) nans
sciImg1[mask_circHighFreq_L.bbox.slices] = mask_circHighFreq_L.data # place the mask cutout (consisting only of 1s) onto the array of nans
sciImg1 = np.multiply(sciImg1,sciImg) # 'transmit' the original science image through the mask
# region 2
sciImg2.fill(np.nan) # initialize arrays of nans
mask_circHighFreq_R.data[mask_circHighFreq_R.data == 0] = np.nan # make zeros within mask cutout (but not in the mask itself) nans
sciImg2[mask_circHighFreq_R.bbox.slices] = mask_circHighFreq_R.data # place the mask cutout (consisting only of 1s) onto the array of nans
sciImg2 = np.multiply(sciImg2,sciImg) # 'transmit' the original science image through the mask
# region 3
sciImg3.fill(np.nan) # initialize arrays of nans
mask_circLowFreq.data[mask_circLowFreq.data == 0] = np.nan # make zeros within mask cutout (but not in the mask itself) nans
sciImg3[mask_circLowFreq.bbox.slices] = mask_circLowFreq.data # place the mask cutout (consisting only of 1s) onto the array of nans
sciImg3 = np.multiply(sciImg3,sciImg) # 'transmit' the original science image through the mask
# region 4
sciImg4.fill(np.nan) # initialize arrays of nans
mask_rect.data[mask_rect.data == 0] = np.nan # make zeros within mask cutout (but not in the mask itself) nans
sciImg4[mask_rect.bbox.slices] = mask_rect.data # place the mask cutout (consisting only of 1s) onto the array of nans
sciImg4 = np.multiply(sciImg4,sciImg) # 'transmit' the original science image through the mask
# return medians of regions under masks
med_highFreqPerfect_L = np.nanmedian(sciImg1)
med_highFreqPerfect_R = np.nanmedian(sciImg2)
med_lowFreqPerfect = np.nanmedian(sciImg3)
med_rect = np.nanmedian(sciImg4)
# return normal vectors corresponding to [x,y,z] to surfaces (x- and y- components are of interest)
normVec_highFreqPerfect_L = normalVector(sciImg1)
normVec_highFreqPerfect_R = normalVector(sciImg2)
normVec_lowFreqPerfect = normalVector(sciImg3)
normVec_rect = normalVector(sciImg4)
# generate images showing footprints of regions of interest
# (comment this bit in/out as desired)
'''
plt.imshow(sciImg1, origin='lower')
plt.show()
plt.imshow(sciImg2, origin='lower')
plt.show()
plt.imshow(sciImg3, origin='lower')
plt.show()
plt.imshow(sciImg4, origin='lower')
plt.show()
plt.clf()
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
cax = ax.imshow(sciImg
|
fftMask
|
identifier_name
|
dropdown.component.ts
|
boolean = false;
// 下拉菜单设置选项
protected _dropdownSettings: DropdownSettings = new DropdownSettings();
public _currentDropdownSettings: DropdownSettings = new DropdownSettings();
// 可选下拉
protected _dropdownOptions: Array<any> = [];
// 已选中下拉
protected _selectedOptions: Array<any> = [];
// 原型
protected _optionModelArr: Array<any>;
public getCalcHeight: number = 0;
@Input('optionModelArr') set optionModelArr(data: Array<any>) {
this._optionModelArr = data;
};
public optionInit: boolean = false;
public selectInit: boolean = false;
public reset: boolean = false;
public resetDropdown: boolean = false;
public resetOption: boolean = false;
public resetSettings: boolean = false;
public selectWarp: any;
public currClass: string = 'se-input-current';
/**
* setParams
* @param param
*/
@Input() set setParams(param: any) {
//this.reset = param;
}
@Input() set calcWindowHeight(height: number) {
if (height) {
this.getCalcHeight = height;
}
}
get optionModelArr() {
return this._optionModelArr;
}
@Output('optionModelArrChange') optionModelArrChange = new EventEmitter<any>();
@ViewChild('toggleInput') toggleInput: ElementRef;
@ViewChild('dropdownInput') dropdownInputComponent: DropdownInputComponent;
@ViewChild('dropdownSelect') dropdownSelectComponent: DropdownSelectComponent;
@ViewChild('toggleSelect') toggleSelect: ElementRef;
@Input('dropdownSettings') set dropdownSettings(data: DropdownSettings) {
if (data) {
this._currentDropdownSettings = data;
}
if (data && !this.resetDropdown) {
for (let key in data) {
if (this.dropdownSettings.hasOwnProperty(key) && data.hasOwnProperty(key)) {
this.dropdownSettings[key] = data[key];
}
}
this.resetDropdown = false;
}
};
get dropdownSettings() {
return this._dropdownSettings;
}
@Input('dropdownOptions') set dropdownOptions(data: Array<any>) {
this.optionInit = true;
this._dropdownOptions = data;
};
get dropdownOptions() {
return this._dropdownOptions;
}
@Input('selectedOptions') set selectedOptions(data: Array<any>) {
if (data.length && this._dropdownOptions.length) {
this._selectedOptions = data;
for (let j in this._selectedOptions) {
let selectedEle = this.typeService.clone(this._selectedOptions[j]);
let flag: boolean = false;
for (let i in this._dropdownOptions) {
let ele = this._dropdownOptions[i];
if (ele.id == selectedEle.id) {
flag = true;
this._dropdownOptions[i].isCurrent = true;
selectedEle = ele;
// //对于选中值 支持除了id其他先默认为空
// if (ele.hasOwnProperty('group') && ele.group && selectedEle.hasOwnProperty('group') && !selectedEle.group) {
// selectedEle.group = ele.group;
// }
// if (ele.hasOwnProperty('key') && ele.key && selectedEle.hasOwnProperty('key') && !selectedEle.key) {
// selectedEle.key = ele.key;
// }
// if (ele.hasOwnProperty('label') && ele.label && selectedEle.hasOwnProperty('label') && !selectedEle.label) {
// selectedEle.label = ele.label;
// }
// if (ele.hasOwnProperty('imageLabel') && ele.imageLabel && selectedEle.hasOwnProperty('imageLabel') && !selectedEle.imageLabel) {
// selectedEle.imageLabel = ele.imageLabel;
// }
// if (ele.hasOwnProperty('desc') && ele.desc && selectedEle.hasOwnProperty('desc') && !selectedEle.desc) {
// selectedEle.desc = ele.desc;
// }
}
}
if (!flag && (selectedEle.label == '' && selectedEle.key == '')) {
selectedEle.label = this.translateService.manualTranslate('Not Found');
}
this._selectedOptions[j] = this.typeService.clone(selectedEle);
}
if (this.reset || this.optionInit) {
this.dropdownInputComponent.selectedOptions = this._selectedOptions;
}
if (this.reset || this.optionInit) {
this.dropdownSelectComponent.selectedOptions = this._selectedOptions;
}
if (this.reset) {
this.reset = false;
}
} else {
this._selectedOptions
= this.dropdownInputComponent.selectedOptions
= this.dropdownSelectComponent.selectedOptions = [];
}
}
get selectedOptions() {
return this._selectedOptions;
}
ngOnChanges(changes: SimpleChanges): void {
let log: string[] = [];
for (let propName in changes) {
let changedProp = changes[propName];
let to = JSON.stringify(changedProp.currentValue);
if (propName === 'selectedOptions') {
this.reset = true;
}
if (propName === 'dropdownOptions') {
this.resetDropdown = true;
this.dropdownSelectComponent.openStatus = false;
}
if (changedProp.isFirstChange()) {
log.push(`Initial value of ${propName} set to ${to}`);
} else {
let from = JSON.stringify(changedProp.previousValue);
log.push(`${propName} changed from ${from} to ${to}`);
if (propName === 'selectedOptions') {
}
if (propName === 'dropdownOptions') {
this.resetOption = true;
this.toggleOptionsChange(changedProp.currentValue);
}
if (propName === 'dropdownSettings') {
this.resetSettings = true;
this.toggleSettingsChange(changedProp.currentValue);
}
}
}
//.log('dropdown component .changeLog', log);
}
ngAfterViewChecked(): void {
if (!this.hasInit && typeof this.toggleInput !== 'undefined') {
this.hasInit = true;
}
}
/**
* 输入框触发下拉选项显示
* @param event
*/
toggleDropdownEvent(event: any) {
// event.stopPropagation();
//对于输入框, 在第一次点击的时候打开下拉菜单
if (event.target.tagName !== 'INPUT') {
this.toggleDropdownSelectStatus();
} else {
if (!this.dropdownSelectComponent.openStatus) {
this.toggleDropdownSelectStatus();
}
}
}
updateOptionModelArr(data?: any) {
let changedOptions = [];
let changeStatus = '';
if (data) {
changedOptions = data[0]; //
changeStatus = data[1]; // add , delete
}
this.optionModelArrChange.emit([this.selectedOptions, changedOptions, changeStatus]);
}
/**
* 将select之前被选中值内容重新赋给input和父模块
*/
toggleSelectedOptionsEvent(data?: any) {
this.dropdownInputComponent.selectedOptions = this.selectedOptions = this.dropdownSelectComponent.selectedOptions;
this.updateOptionModelArr(data);
}
/**
* 将input之前被删除内容重新赋给input和父模块
*/
removeSelectedOptionsEvent(data?: any) {
this.dropdownSelectComponent.selectedOptions = this.selectedOptions = this.dropdownInputComponent.selectedOptions;
let ele = data[0];
for (let i in this.dropdownSelectComponent.dropdownOptions) {
if (this.dropdownSelectComponent.dropdownOptions.hasOwnProperty(i)
&& this.dropdownSelectComponent.dropdownOptions[i].key == ele.key) {
this.dropdownSelectComponent.dropdownOptions[i].isCurrent = false;
}
}
this.dropdownOptions = this.dropdownSelectComponent.dropdownOptions;
this.updateOptionModelArr(data);
}
/**
* 下拉菜单显示样式控制
*/
toggleDropdownSelectStatus() {
if (!this.dropdownSelectComponent.openStatus) {
//TODO: JS控制动态高度实现渐变效果;
this.dropdownSelectComponent.autoHeight = 'auto';
}
this.dropdownSelectComponent.openStatus = !this.dropdownSelectComponent.openStatus;
this.renderer.setElementClass(this.toggleSelect.nativeElement, 'hide', false);
}
/**
* input模块触发select中内容搜索
* todo: 支持远程搜索
* @param data
*/
triggerSearchAction(data: any) {
if (typeof data !== 'undefined') {
let searchText = data[0];
if (searchText !== '') {
this.toggleDropdownSelectStatus();
this.dropdownSelectComponent.filterDropdown(searchText);
} else {
this.dropdownSelectComponent.resetFilterDropdown();
}
}
}
toggleSettingsChange(settings: any) {
if (this.resetSettings && settings) {
//let settings = new DropdownSettings(data);
this.dropdownSelectComponent.settings = settings;
this.dropdownInputComponent.settings = settings;
this.resetSettings = false;
}
}
toggleOptionsChange(data: any) {
if (this.resetOption && data) {
this._dropdownOptions = data;
this.dropdownSelectComponent.dropdownOptions = data;
this.resetOption = false;
}
}
@Output() doCloseDropDown = new EventEmitter<any>();
closeOptionDropdown(data?: any) {
|
this.renderer.setElementClass(this.selectWarp.toggleSelectElement, 'hide', true);
this.renderer.setElementClass(this.selectWarp.toggleInput, 'se-input-current', false);
|
identifier_body
|
|
dropdown.component.ts
|
.component";
import {DropdownSelectComponent} from "./dropdown-select.component";
import {DropdownOptionModel} from "../dropdown-element";
@Component({
selector: 'dropdown-search',
templateUrl: './../template/dropdown.component.html',
encapsulation: ViewEncapsulation.None
})
export class DropdownComponent implements AfterViewChecked, OnChanges, OnInit {
ngOnInit(): void {
}
changeLog: string[] = [];
constructor(private renderer: Renderer,
@Inject('type.service') public typeService: any,
@Inject('bi-translate.service') public translateService: any,
@Inject('user-data.service') public userDataService: any,
@Inject('toggle-select.service') public toggleSelectService: any) {
}
protected hasInit: boolean = false;
// 下拉菜单设置选项
protected _dropdownSettings: DropdownSettings = new DropdownSettings();
public _currentDropdownSettings: DropdownSettings = new DropdownSettings();
// 可选下拉
protected _dropdownOptions: Array<any> = [];
// 已选中下拉
protected _selectedOptions: Array<any> = [];
// 原型
protected _optionModelArr: Array<any>;
public getCalcHeight: number = 0;
@Input('optionModelArr') set optionModelArr(data: Array<any>) {
this._optionModelArr = data;
};
public optionInit: boolean = false;
public selectInit: boolean = false;
public reset: boolean = false;
public resetDropdown: boolean = false;
public resetOption: boolean = false;
public resetSettings: boolean = false;
public selectWarp: any;
public currClass: string = 'se-input-current';
/**
* setParams
* @param param
*/
@Input() set setParams(param: any) {
//this.reset = param;
}
@Input() set calcWindowHeight(height: number) {
if (height) {
this.getCalcHeight = height;
}
}
get optionModelArr() {
return this._optionModelArr;
}
@Output('optionModelArrChange') optionModelArrChange = new EventEmitter<any>();
@ViewChild('toggleInput') toggleInput: ElementRef;
@ViewChild('dropdownInput') dropdownInputComponent: DropdownInputComponent;
@ViewChild('dropdownSelect') dropdownSelectComponent: DropdownSelectComponent;
|
}
if (data && !this.resetDropdown) {
for (let key in data) {
if (this.dropdownSettings.hasOwnProperty(key) && data.hasOwnProperty(key)) {
this.dropdownSettings[key] = data[key];
}
}
this.resetDropdown = false;
}
};
get dropdownSettings() {
return this._dropdownSettings;
}
@Input('dropdownOptions') set dropdownOptions(data: Array<any>) {
this.optionInit = true;
this._dropdownOptions = data;
};
get dropdownOptions() {
return this._dropdownOptions;
}
@Input('selectedOptions') set selectedOptions(data: Array<any>) {
if (data.length && this._dropdownOptions.length) {
this._selectedOptions = data;
for (let j in this._selectedOptions) {
let selectedEle = this.typeService.clone(this._selectedOptions[j]);
let flag: boolean = false;
for (let i in this._dropdownOptions) {
let ele = this._dropdownOptions[i];
if (ele.id == selectedEle.id) {
flag = true;
this._dropdownOptions[i].isCurrent = true;
selectedEle = ele;
// //对于选中值 支持除了id其他先默认为空
// if (ele.hasOwnProperty('group') && ele.group && selectedEle.hasOwnProperty('group') && !selectedEle.group) {
// selectedEle.group = ele.group;
// }
// if (ele.hasOwnProperty('key') && ele.key && selectedEle.hasOwnProperty('key') && !selectedEle.key) {
// selectedEle.key = ele.key;
// }
// if (ele.hasOwnProperty('label') && ele.label && selectedEle.hasOwnProperty('label') && !selectedEle.label) {
// selectedEle.label = ele.label;
// }
// if (ele.hasOwnProperty('imageLabel') && ele.imageLabel && selectedEle.hasOwnProperty('imageLabel') && !selectedEle.imageLabel) {
// selectedEle.imageLabel = ele.imageLabel;
// }
// if (ele.hasOwnProperty('desc') && ele.desc && selectedEle.hasOwnProperty('desc') && !selectedEle.desc) {
// selectedEle.desc = ele.desc;
// }
}
}
if (!flag && (selectedEle.label == '' && selectedEle.key == '')) {
selectedEle.label = this.translateService.manualTranslate('Not Found');
}
this._selectedOptions[j] = this.typeService.clone(selectedEle);
}
if (this.reset || this.optionInit) {
this.dropdownInputComponent.selectedOptions = this._selectedOptions;
}
if (this.reset || this.optionInit) {
this.dropdownSelectComponent.selectedOptions = this._selectedOptions;
}
if (this.reset) {
this.reset = false;
}
} else {
this._selectedOptions
= this.dropdownInputComponent.selectedOptions
= this.dropdownSelectComponent.selectedOptions = [];
}
}
get selectedOptions() {
return this._selectedOptions;
}
ngOnChanges(changes: SimpleChanges): void {
let log: string[] = [];
for (let propName in changes) {
let changedProp = changes[propName];
let to = JSON.stringify(changedProp.currentValue);
if (propName === 'selectedOptions') {
this.reset = true;
}
if (propName === 'dropdownOptions') {
this.resetDropdown = true;
this.dropdownSelectComponent.openStatus = false;
}
if (changedProp.isFirstChange()) {
log.push(`Initial value of ${propName} set to ${to}`);
} else {
let from = JSON.stringify(changedProp.previousValue);
log.push(`${propName} changed from ${from} to ${to}`);
if (propName === 'selectedOptions') {
}
if (propName === 'dropdownOptions') {
this.resetOption = true;
this.toggleOptionsChange(changedProp.currentValue);
}
if (propName === 'dropdownSettings') {
this.resetSettings = true;
this.toggleSettingsChange(changedProp.currentValue);
}
}
}
//.log('dropdown component .changeLog', log);
}
ngAfterViewChecked(): void {
if (!this.hasInit && typeof this.toggleInput !== 'undefined') {
this.hasInit = true;
}
}
/**
* 输入框触发下拉选项显示
* @param event
*/
toggleDropdownEvent(event: any) {
// event.stopPropagation();
//对于输入框, 在第一次点击的时候打开下拉菜单
if (event.target.tagName !== 'INPUT') {
this.toggleDropdownSelectStatus();
} else {
if (!this.dropdownSelectComponent.openStatus) {
this.toggleDropdownSelectStatus();
}
}
}
updateOptionModelArr(data?: any) {
let changedOptions = [];
let changeStatus = '';
if (data) {
changedOptions = data[0]; //
changeStatus = data[1]; // add , delete
}
this.optionModelArrChange.emit([this.selectedOptions, changedOptions, changeStatus]);
}
/**
* 将select之前被选中值内容重新赋给input和父模块
*/
toggleSelectedOptionsEvent(data?: any) {
this.dropdownInputComponent.selectedOptions = this.selectedOptions = this.dropdownSelectComponent.selectedOptions;
this.updateOptionModelArr(data);
}
/**
* 将input之前被删除内容重新赋给input和父模块
*/
removeSelectedOptionsEvent(data?: any) {
this.dropdownSelectComponent.selectedOptions = this.selectedOptions = this.dropdownInputComponent.selectedOptions;
let ele = data[0];
for (let i in this.dropdownSelectComponent.dropdownOptions) {
if (this.dropdownSelectComponent.dropdownOptions.hasOwnProperty(i)
&& this.dropdownSelectComponent.dropdownOptions[i].key == ele.key) {
this.dropdownSelectComponent.dropdownOptions[i].isCurrent = false;
}
}
this.dropdownOptions = this.dropdownSelectComponent.dropdownOptions;
this.updateOptionModelArr(data);
}
/**
* 下拉菜单显示样式控制
*/
toggleDropdownSelectStatus() {
if (!this.dropdownSelectComponent.openStatus) {
//TODO: JS控制动态高度实现渐变效果;
this.dropdownSelectComponent.autoHeight = 'auto';
}
this.dropdownSelectComponent.openStatus = !this.dropdownSelectComponent.openStatus;
this.renderer.setElementClass(this.toggleSelect.nativeElement, 'hide', false);
}
/**
* input模块触发select中内容搜索
* todo: 支持远程搜索
* @param data
*/
triggerSearchAction(data: any) {
if (typeof data !== 'undefined') {
let searchText = data[0];
if (searchText !== '') {
this.toggleDropdownSelectStatus();
this.dropdownSelectComponent.filterDropdown(searchText);
} else {
this.dropdownSelectComponent.resetFilterDropdown();
}
}
}
toggleSettingsChange(settings: any) {
if (this
|
@ViewChild('toggleSelect') toggleSelect: ElementRef;
@Input('dropdownSettings') set dropdownSettings(data: DropdownSettings) {
if (data) {
this._currentDropdownSettings = data;
|
random_line_split
|
dropdown.component.ts
|
.component";
import {DropdownSelectComponent} from "./dropdown-select.component";
import {DropdownOptionModel} from "../dropdown-element";
@Component({
selector: 'dropdown-search',
templateUrl: './../template/dropdown.component.html',
encapsulation: ViewEncapsulation.None
})
export class DropdownComponent implements AfterViewChecked, OnChanges, OnInit {
ngOnInit(): void {
}
changeLog: string[] = [];
constructor(private renderer: Renderer,
@Inject('type.service') public typeService: any,
@Inject('bi-translate.service') public translateService: any,
@Inject('user-data.service') public userDataService: any,
@Inject('toggle-select.service') public toggleSelectService: any) {
}
protected hasInit: boolean = false;
// 下拉菜单设置选项
protected _dropdownSettings: DropdownSettings = new DropdownSettings();
public _currentDropdownSettings: DropdownSettings = new DropdownSettings();
// 可选下拉
protected _dropdownOptions: Array<any> = [];
// 已选中下拉
protected _selectedOptions: Array<any> = [];
// 原型
protected _optionModelArr: Array<any>;
public getCalcHeight: number = 0;
@Input('optionModelArr') set optionModelArr(data: Array<any>) {
this._optionModelArr = data;
};
public optionInit: boolean = false;
public selectInit: boolean = false;
public reset: boolean = false;
public resetDropdown: boolean = false;
public resetOption: boolean = false;
public resetSettings: boolean = false;
public selectWarp: any;
public currClass: string = 'se-input-current';
/**
* setParams
* @param param
*/
@Input() set setParams(param: any) {
//this.reset = param;
}
@Input() set calcWindowHeight(height: number) {
|
this.getCalcHeight = height;
}
}
get optionModelArr() {
return this._optionModelArr;
}
@Output('optionModelArrChange') optionModelArrChange = new EventEmitter<any>();
@ViewChild('toggleInput') toggleInput: ElementRef;
@ViewChild('dropdownInput') dropdownInputComponent: DropdownInputComponent;
@ViewChild('dropdownSelect') dropdownSelectComponent: DropdownSelectComponent;
@ViewChild('toggleSelect') toggleSelect: ElementRef;
@Input('dropdownSettings') set dropdownSettings(data: DropdownSettings) {
if (data) {
this._currentDropdownSettings = data;
}
if (data && !this.resetDropdown) {
for (let key in data) {
if (this.dropdownSettings.hasOwnProperty(key) && data.hasOwnProperty(key)) {
this.dropdownSettings[key] = data[key];
}
}
this.resetDropdown = false;
}
};
get dropdownSettings() {
return this._dropdownSettings;
}
@Input('dropdownOptions') set dropdownOptions(data: Array<any>) {
this.optionInit = true;
this._dropdownOptions = data;
};
get dropdownOptions() {
return this._dropdownOptions;
}
@Input('selectedOptions') set selectedOptions(data: Array<any>) {
if (data.length && this._dropdownOptions.length) {
this._selectedOptions = data;
for (let j in this._selectedOptions) {
let selectedEle = this.typeService.clone(this._selectedOptions[j]);
let flag: boolean = false;
for (let i in this._dropdownOptions) {
let ele = this._dropdownOptions[i];
if (ele.id == selectedEle.id) {
flag = true;
this._dropdownOptions[i].isCurrent = true;
selectedEle = ele;
// //对于选中值 支持除了id其他先默认为空
// if (ele.hasOwnProperty('group') && ele.group && selectedEle.hasOwnProperty('group') && !selectedEle.group) {
// selectedEle.group = ele.group;
// }
// if (ele.hasOwnProperty('key') && ele.key && selectedEle.hasOwnProperty('key') && !selectedEle.key) {
// selectedEle.key = ele.key;
// }
// if (ele.hasOwnProperty('label') && ele.label && selectedEle.hasOwnProperty('label') && !selectedEle.label) {
// selectedEle.label = ele.label;
// }
// if (ele.hasOwnProperty('imageLabel') && ele.imageLabel && selectedEle.hasOwnProperty('imageLabel') && !selectedEle.imageLabel) {
// selectedEle.imageLabel = ele.imageLabel;
// }
// if (ele.hasOwnProperty('desc') && ele.desc && selectedEle.hasOwnProperty('desc') && !selectedEle.desc) {
// selectedEle.desc = ele.desc;
// }
}
}
if (!flag && (selectedEle.label == '' && selectedEle.key == '')) {
selectedEle.label = this.translateService.manualTranslate('Not Found');
}
this._selectedOptions[j] = this.typeService.clone(selectedEle);
}
if (this.reset || this.optionInit) {
this.dropdownInputComponent.selectedOptions = this._selectedOptions;
}
if (this.reset || this.optionInit) {
this.dropdownSelectComponent.selectedOptions = this._selectedOptions;
}
if (this.reset) {
this.reset = false;
}
} else {
this._selectedOptions
= this.dropdownInputComponent.selectedOptions
= this.dropdownSelectComponent.selectedOptions = [];
}
}
get selectedOptions() {
return this._selectedOptions;
}
ngOnChanges(changes: SimpleChanges): void {
let log: string[] = [];
for (let propName in changes) {
let changedProp = changes[propName];
let to = JSON.stringify(changedProp.currentValue);
if (propName === 'selectedOptions') {
this.reset = true;
}
if (propName === 'dropdownOptions') {
this.resetDropdown = true;
this.dropdownSelectComponent.openStatus = false;
}
if (changedProp.isFirstChange()) {
log.push(`Initial value of ${propName} set to ${to}`);
} else {
let from = JSON.stringify(changedProp.previousValue);
log.push(`${propName} changed from ${from} to ${to}`);
if (propName === 'selectedOptions') {
}
if (propName === 'dropdownOptions') {
this.resetOption = true;
this.toggleOptionsChange(changedProp.currentValue);
}
if (propName === 'dropdownSettings') {
this.resetSettings = true;
this.toggleSettingsChange(changedProp.currentValue);
}
}
}
//.log('dropdown component .changeLog', log);
}
ngAfterViewChecked(): void {
if (!this.hasInit && typeof this.toggleInput !== 'undefined') {
this.hasInit = true;
}
}
/**
* 输入框触发下拉选项显示
* @param event
*/
toggleDropdownEvent(event: any) {
// event.stopPropagation();
//对于输入框, 在第一次点击的时候打开下拉菜单
if (event.target.tagName !== 'INPUT') {
this.toggleDropdownSelectStatus();
} else {
if (!this.dropdownSelectComponent.openStatus) {
this.toggleDropdownSelectStatus();
}
}
}
updateOptionModelArr(data?: any) {
let changedOptions = [];
let changeStatus = '';
if (data) {
changedOptions = data[0]; //
changeStatus = data[1]; // add , delete
}
this.optionModelArrChange.emit([this.selectedOptions, changedOptions, changeStatus]);
}
/**
* 将select之前被选中值内容重新赋给input和父模块
*/
toggleSelectedOptionsEvent(data?: any) {
this.dropdownInputComponent.selectedOptions = this.selectedOptions = this.dropdownSelectComponent.selectedOptions;
this.updateOptionModelArr(data);
}
/**
* 将input之前被删除内容重新赋给input和父模块
*/
removeSelectedOptionsEvent(data?: any) {
this.dropdownSelectComponent.selectedOptions = this.selectedOptions = this.dropdownInputComponent.selectedOptions;
let ele = data[0];
for (let i in this.dropdownSelectComponent.dropdownOptions) {
if (this.dropdownSelectComponent.dropdownOptions.hasOwnProperty(i)
&& this.dropdownSelectComponent.dropdownOptions[i].key == ele.key) {
this.dropdownSelectComponent.dropdownOptions[i].isCurrent = false;
}
}
this.dropdownOptions = this.dropdownSelectComponent.dropdownOptions;
this.updateOptionModelArr(data);
}
/**
* 下拉菜单显示样式控制
*/
toggleDropdownSelectStatus() {
if (!this.dropdownSelectComponent.openStatus) {
//TODO: JS控制动态高度实现渐变效果;
this.dropdownSelectComponent.autoHeight = 'auto';
}
this.dropdownSelectComponent.openStatus = !this.dropdownSelectComponent.openStatus;
this.renderer.setElementClass(this.toggleSelect.nativeElement, 'hide', false);
}
/**
* input模块触发select中内容搜索
* todo: 支持远程搜索
* @param data
*/
triggerSearchAction(data: any) {
if (typeof data !== 'undefined') {
let searchText = data[0];
if (searchText !== '') {
this.toggleDropdownSelectStatus();
this.dropdownSelectComponent.filterDropdown(searchText);
} else {
this.dropdownSelectComponent.resetFilterDropdown();
}
}
}
toggleSettingsChange(settings: any) {
if (
|
if (height) {
|
identifier_name
|
dropdown.component.ts
|
.component";
import {DropdownSelectComponent} from "./dropdown-select.component";
import {DropdownOptionModel} from "../dropdown-element";
@Component({
selector: 'dropdown-search',
templateUrl: './../template/dropdown.component.html',
encapsulation: ViewEncapsulation.None
})
export class DropdownComponent implements AfterViewChecked, OnChanges, OnInit {
ngOnInit(): void {
}
changeLog: string[] = [];
constructor(private renderer: Renderer,
@Inject('type.service') public typeService: any,
@Inject('bi-translate.service') public translateService: any,
@Inject('user-data.service') public userDataService: any,
@Inject('toggle-select.service') public toggleSelectService: any) {
}
protected hasInit: boolean = false;
// 下拉菜单设置选项
protected _dropdownSettings: DropdownSettings = new DropdownSettings();
public _currentDropdownSettings: DropdownSettings = new DropdownSettings();
// 可选下拉
protected _dropdownOptions: Array<any> = [];
// 已选中下拉
protected _selectedOptions: Array<any> = [];
// 原型
protected _optionModelArr: Array<any>;
public getCalcHeight: number = 0;
@Input('optionModelArr') set optionModelArr(data: Array<any>) {
this._optionModelArr = data;
};
public optionInit: boolean = false;
public selectInit: boolean = false;
public reset: boolean = false;
public resetDropdown: boolean = false;
public resetOption: boolean = false;
public resetSettings: boolean = false;
public selectWarp: any;
public currClass: string = 'se-input-current';
/**
* setParams
* @param param
*/
@Input() set setParams(param: any) {
//this.reset = param;
}
@Input() set calcWindowHeight(height: number) {
if (height) {
this.getCalcHeight = height;
}
}
get optionModelArr() {
return this._optionModelArr;
}
@Output('optionModelArrChange') optionModelArrChange = new EventEmitter<any>();
@ViewChild('toggleInput') toggleInput: ElementRef;
@ViewChild('dropdownInput') dropdownInputComponent: DropdownInputComponent;
@ViewChild('dropdownSelect') dropdownSelectComponent: DropdownSelectComponent;
@ViewChild('toggleSelect') toggleSelect: ElementRef;
@Input('dropdownSettings') set dropdownSettings(data: DropdownSettings) {
if (data) {
this._currentDropdownSettings = data;
}
if (data && !this.resetDropdown) {
for (let key in data) {
if (this.dropdownSettings.hasOwnProperty(key) && data.hasOwnProperty(key)) {
this.dropdownSettings[key] = data[key];
}
}
this.resetDropdown = false;
}
};
get dropdownSettings() {
return this._dropdownSettings;
}
@Input('dropdownOptions') set dropdownOptions(data: Array<any>) {
this.optionInit = true;
this._dropdownOptions = data;
};
get dropdownOptions() {
return this._dropdownOptions;
}
@Input('selectedOptions') set selectedOptions(data: Array<any>) {
if (data.length && this._dropdownOptions.length) {
this._selectedOptions = data;
for (let j in this._selectedOptions) {
let selectedEle = this.typeService.clone(this._selectedOptions[j]);
let flag: boolean = false;
for (let i in this._dropdownOptions) {
let ele = this._dropdownOptions[i];
if (ele.id == selectedEle.id) {
flag = true;
this._dropdownOptions[i].isCurrent = true;
selectedEle = ele;
// //对于选中值 支持除了id其他先默认为空
// if (ele.hasOwnProperty('group') && ele.group && selectedEle.hasOwnProperty('group') && !selectedEle.group) {
// selectedEle.group = ele.group;
// }
// if (ele.hasOwnProperty('key') && ele.key && selectedEle.hasOwnProperty('key') && !selectedEle.key) {
// selectedEle.key = ele.key;
// }
// if (ele.hasOwnProperty('label') && ele.label && selectedEle.hasOwnProperty('label') && !selectedEle.label) {
// selectedEle.label = ele.label;
// }
// if (ele.hasOwnProperty('imageLabel') && ele.imageLabel && selectedEle.hasOwnProperty('imageLabel') && !selectedEle.imageLabel) {
// selectedEle.imageLabel = ele.imageLabel;
// }
// if (ele.hasOwnProperty('desc') && ele.desc && selectedEle.hasOwnProperty('desc') && !selectedEle.desc) {
// selectedEle.desc = ele.desc;
// }
}
}
if (!flag && (selectedEle.label == '' && selectedEle.key == '')) {
selectedEle.label = this.translateService.manualTranslate(
|
);
}
if (this.reset || this.optionInit) {
this.dropdownInputComponent.selectedOptions = this._selectedOptions;
}
if (this.reset || this.optionInit) {
this.dropdownSelectComponent.selectedOptions = this._selectedOptions;
}
if (this.reset) {
this.reset = false;
}
} else {
this._selectedOptions
= this.dropdownInputComponent.selectedOptions
= this.dropdownSelectComponent.selectedOptions = [];
}
}
get selectedOptions() {
return this._selectedOptions;
}
ngOnChanges(changes: SimpleChanges): void {
let log: string[] = [];
for (let propName in changes) {
let changedProp = changes[propName];
let to = JSON.stringify(changedProp.currentValue);
if (propName === 'selectedOptions') {
this.reset = true;
}
if (propName === 'dropdownOptions') {
this.resetDropdown = true;
this.dropdownSelectComponent.openStatus = false;
}
if (changedProp.isFirstChange()) {
log.push(`Initial value of ${propName} set to ${to}`);
} else {
let from = JSON.stringify(changedProp.previousValue);
log.push(`${propName} changed from ${from} to ${to}`);
if (propName === 'selectedOptions') {
}
if (propName === 'dropdownOptions') {
this.resetOption = true;
this.toggleOptionsChange(changedProp.currentValue);
}
if (propName === 'dropdownSettings') {
this.resetSettings = true;
this.toggleSettingsChange(changedProp.currentValue);
}
}
}
//.log('dropdown component .changeLog', log);
}
ngAfterViewChecked(): void {
if (!this.hasInit && typeof this.toggleInput !== 'undefined') {
this.hasInit = true;
}
}
/**
* 输入框触发下拉选项显示
* @param event
*/
toggleDropdownEvent(event: any) {
// event.stopPropagation();
//对于输入框, 在第一次点击的时候打开下拉菜单
if (event.target.tagName !== 'INPUT') {
this.toggleDropdownSelectStatus();
} else {
if (!this.dropdownSelectComponent.openStatus) {
this.toggleDropdownSelectStatus();
}
}
}
updateOptionModelArr(data?: any) {
let changedOptions = [];
let changeStatus = '';
if (data) {
changedOptions = data[0]; //
changeStatus = data[1]; // add , delete
}
this.optionModelArrChange.emit([this.selectedOptions, changedOptions, changeStatus]);
}
/**
* 将select之前被选中值内容重新赋给input和父模块
*/
toggleSelectedOptionsEvent(data?: any) {
this.dropdownInputComponent.selectedOptions = this.selectedOptions = this.dropdownSelectComponent.selectedOptions;
this.updateOptionModelArr(data);
}
/**
* 将input之前被删除内容重新赋给input和父模块
*/
removeSelectedOptionsEvent(data?: any) {
this.dropdownSelectComponent.selectedOptions = this.selectedOptions = this.dropdownInputComponent.selectedOptions;
let ele = data[0];
for (let i in this.dropdownSelectComponent.dropdownOptions) {
if (this.dropdownSelectComponent.dropdownOptions.hasOwnProperty(i)
&& this.dropdownSelectComponent.dropdownOptions[i].key == ele.key) {
this.dropdownSelectComponent.dropdownOptions[i].isCurrent = false;
}
}
this.dropdownOptions = this.dropdownSelectComponent.dropdownOptions;
this.updateOptionModelArr(data);
}
/**
* 下拉菜单显示样式控制
*/
toggleDropdownSelectStatus() {
if (!this.dropdownSelectComponent.openStatus) {
//TODO: JS控制动态高度实现渐变效果;
this.dropdownSelectComponent.autoHeight = 'auto';
}
this.dropdownSelectComponent.openStatus = !this.dropdownSelectComponent.openStatus;
this.renderer.setElementClass(this.toggleSelect.nativeElement, 'hide', false);
}
/**
* input模块触发select中内容搜索
* todo: 支持远程搜索
* @param data
*/
triggerSearchAction(data: any) {
if (typeof data !== 'undefined') {
let searchText = data[0];
if (searchText !== '') {
this.toggleDropdownSelectStatus();
this.dropdownSelectComponent.filterDropdown(searchText);
} else {
this.dropdownSelectComponent.resetFilterDropdown();
}
}
}
toggleSettingsChange(settings: any) {
if (
|
'Not Found');
}
this._selectedOptions[j] = this.typeService.clone(selectedEle
|
conditional_block
|
Detector.py
|
.centralwidget)
self.label_3.setGeometry(QtCore.QRect(630, 480, 221, 51))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(12)
self.label_3.setFont(font)
self.label_3.setStyleSheet("color: rgb(0, 0, 0);")
self.label_3.setObjectName("label_3")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(640, 100, 93, 28))
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
self.pushButton.setFont(font)
self.pushButton.setStyleSheet("color: rgb(0, 0, 0);")
self.pushButton.setObjectName("pushButton")
self.graphicsView = QtWidgets.QGraphicsView(self.centralwidget)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.gridLayout.addWidget(self.graphicsView , 10, 30, 601, 541)
self.graphicsView.setGeometry(QtCore.QRect(10, 30, 601, 541))
self.graphicsView.setObjectName("graphicsView")
# self.lb = MyLabel(self)
# self.lb.setGeometry(QRect(10, 30, 601, 541))
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(630, 50, 111, 31))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(12)
self.pushButton_2.setFont(font)
self.pushButton_2.setMouseTracking(False)
self.pushButton_2.setStyleSheet("color: rgb(0, 0, 0);")
self.pushButton_2.setObjectName("pushButton_2")
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setEnabled(False)
self.textEdit.setGeometry(QtCore.QRect(720, 390, 104, 31))
self.textEdit.setStyleSheet("\n"
"background-color: rgb(255, 255, 255);")
self.textEdit.setObjectName("textEdit")
self.textEdit_2 = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit_2.setEnabled(False)
self.textEdit_2.setGeometry(QtCore.QRect(820, 440, 111, 31))
self.textEdit_2.setStyleSheet("\n"
"background-color: rgb(255, 255, 255);")
self.textEdit_2.setObjectName("textEdit_2")
self.textEdit_3 = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit_3.setEnabled(False)
self.textEdit_3.setGeometry(QtCore.QRect(840, 490, 104, 31))
self.textEdit_3.setStyleSheet("\n"
"background-color: rgb(255, 255, 255);")
self.textEdit_3.setObjectName("textEdit_3")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(630, 160, 111, 31))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(12)
self.pushButton_3.setFont(font)
self.pushButton_3.setMouseTracking(True)
self.pushButton_3.setStyleSheet("color: rgb(0, 0, 0);")
self.pushButton_3.setObjectName("pushButton_3")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(640, 210, 141, 51))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(12)
self.label_4.setFont(font)
self.label_4.setStyleSheet("color: rgb(0, 0, 0);")
self.label_4.setObjectName("label_4")
self.textEdit_4 = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit_4.setEnabled(False)
self.textEdit_4.setGeometry(QtCore.QRect(640, 260, 281, 31))
self.textEdit_4.setStyleSheet("\n"
"background-color: rgb(255, 255, 255);")
self.textEdit_4.setObjectName("textEdit_4")
self.label_2.raise_()
self.label_3.raise_()
self.pushButton.raise_()
self.graphicsView.raise_()
self.label.raise_()
self.pushButton_2.raise_()
self.textEdit.raise_()
self.textEdit_2.raise_()
self.textEdit_3.raise_()
self.pushButton_3.raise_()
self.label_4.raise_()
self.textEdit_4.raise_()
# self.lb.raise_()
mainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(mainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 958, 26))
self.menubar.setObjectName("menubar")
mainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(mainWindow)
self.statusbar.setObjectName("statusbar")
mainWindow.setStatusBar(self.statusbar)
self.retranslateUi(mainWindow)
QtCore.QMetaObject.connectSlotsByName(mainWindow)
self.pushButton.clicked.connect(self.on)
self.pushButton_2.clicked.connect(self.pb_2)
self.pushButton_3.clicked.connect(QCoreApplication.instance().quit)
def retranslateUi(self, mainWindow):
_translate = QtCore.QCoreApplication.translate
mainWindow.setWindowTitle(_translate("mainWindow", "舌像裂纹分析"))
self.label.setText(_translate("mainWindow", "裂纹条数:"))
self.label_2.setText(_translate("mainWindow", "裂纹所占像素点个数:"))
self.label_3.setText(_translate("mainWindow", "裂纹占整个舌头的比例:"))
self.pushButton.setText(_translate("mainWindow", "Next"))
self.pushButton_2.setText(_translate("mainWindow", "选择文件夹"))
self.pushButton_3.setText(_translate("mainWindow", "保存并退出"))
self.label_4.setText(_translate("mainWindow", "当前图片名称:"))
def shows(self, img2):
# img2 = img.copy()
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
height = img2.shape[0]
width = img2.shape[1]
ratio = float(height / width)
new_height = 538
new_width = 598
withStep=new_width*3
img = cv2.resize(img2, (new_width, new_height))
frame = QImage(img, new_width, new_height,withStep, QImage.Format_RGB888)
pix = QPixmap.fromImage(frame)
self.item = QGraphicsPixmapItem(pix)
self.scene = QGraphicsScene() # 创建场景
self.scene.addItem(self.item)
self.graphicsView.setScene(self.scene)
# cut_img = img[x0:abs(x1 - x0), y0:abs(y1 - y0)]
# # detection(cut_img)
# cv2.setMouseCallback('graphicsView',on_mouse())
def on(self):
self.flag = True
print("线程开启")
self.i=self.i+1
self.file = self.imgfiles[self.i]
t = threading.Thread(target=self.pb_1(), name='t')
t.start()
def pb_2(self):
self.i=-1
self.openfile_name = QFileDialog.getExistingDirectory(self, '选择文件', '')
self.imgfiles = os.listdir(self.openfile_name)
global c, r, s1,img,x0,x1,y0,y1
self.wb = xlwt.Workbook()
self.ws1 = self.wb.add_sheet('A Test Sheet', cell_overwrite_ok=True)
self.row = 1
self.col = 1
self.ws1.write(0, 0, "图片编号 ")
self.wb.save('Data.xls')
self.ws1.write(0, 1, "裂纹条数 ")
self.wb.save('Data.xls')
self.ws1.write(0, 2, "裂纹像素点总和 ")
self.wb.save('Data.xls')
self.
|
ws1.write(0, 3, "裂纹在整个舌头中的占比 ")
self.wb.save('Data.xls')
def pb_1(self):
global c, r, s1, img, x0, x1, y0, y1
while self.flag:
c = 0
r = 0
s1 = 0
# self.textEdit_4.setText(self.file)
# self.textEdit_3.setText("{}".format(s1))
# self.textEdit_2.setText("{}".format(c))
# self.textEdit.setText("{}".format(r))
cv2.waitKey(0)
img = cv2.imread(self.openfile_name + "/" + self.file)
print(self.openfile_name + "/" + self.file)
mainMindow.shows(img)
cv2.namedWindow('image', 0)
|
identifier_body
|
|
Detector.py
|
:, 1]
# 图片处理
def bi_demo(image, d, m, n): # 双边滤波
dst = cv2.bilateralFilter(image, d, m, n)
return dst
kernel = np.ones((6, 6), dtype=np.uint8)
erosion = cv2.erode(img2, kernel, 16)
img2 = cv2.dilate(img2, kernel, 25)
img2 = cv2.morphologyEx(img2, cv2.MORPH_CLOSE, kernel, 19)
img3 = bi_demo(img2, 9, 10, 19)
def detect(image): ###裂纹检测
start = 0
l = image.shape[0] // 20
for i in range(0, l + 1, 1):
end = start + 20
if i == l:
imag = image[start:image.shape[0], :]
else:
imag = image[start:end, :]
max1 = max(max(row) for row in imag)
for y in range(0, imag.shape[0], 1):
for x in range(0, imag.shape[1], 1):
if (max1 * 8 // 9 < imag[y, x] < 180):
imag[y, x] = 255
else:
imag[y, x] = 0
if i == l:
image[start:image.shape[0], :] = imag[:, :]
else:
image[start:end, :] = imag[:, :]
start = end
detect(img3)
# 面积计算
def area(image, image0): ###计算裂纹所占像素
count = 0
all = 0
for y in range(0, image0.shape[0], 1):
for x in range(0, image0.shape[1], 1):
if (image0[y, x] == 0):
all = all + 1
img4 = img[:, :, 0]
for y in range(0, image.shape[0], 1):
for x in range(0, image.shape[1], 1):
if (image[y, x] == 255):
count = count + 1
getcontext().prec = 4
s = Decimal(count) / Decimal((img4.shape[0] * img4.shape[1] - all))
return count, s
count, s = area(img3, img2)
str = '要显示的字符串'
print("舌像裂纹面积为:{} 像素点, 占整个舌头像素的:{}".format(count, s))
result = lwdt(img3)
r = result
c = count
s1 = s
mainMindow.shows(img3)
mainMindow.textEdit_4.setText(mainMindow.file)
mainMindow.textEdit_3.setText("{}".format(s1))
mainMindow.textEdit_2.setText("{}".format(c))
mainMindow.textEdit.setText("{}".format(r))
# file_name = file.split('.')[0]
# os.makedirs("./new/{}".format(file_name))
# cv2.imwrite("./new/{}/{}".format(file_name, file), img00)
# cv2.imwrite("./new/{}/{}".format(file_name, "0.jpg"), img0)
# cv2.imwrite("./new/{}/{}".format(file_name, "1.jpg"), img1)
# cv2.imwrite("./new/{}/{}".format(file_name, "2.jpg"), img2)
# cv2.imwrite("./new/{}/{}".format(file_name, "3.jpg"), img3)
def on_mouse(event, x, y, flags, param):
global point1, point2, img
img2 = img.copy()
if event == cv2.EVENT_LBUTTONDOWN: # 左键点击
point1 = (x, y)
# 按住左键拖曳
cv2.rectangle(img2, point1, (x, y), (0, 255, 0), 1)
cv2.namedWindow("image", 0)
cv2.imshow('image', img2)
elif event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG
|
): # 按住左键拖曳
cv2.rectangle(img2, point1, (x, y), (0, 255, 0), 1)
cv2.imshow('image', img2)
elif event == cv2.EVENT_LBUTTONUP: # 左键释放
point2 = (x, y)
cv2.rectangle(img2, point1, point2, (0, 0, 255), 1)
cv2.imshow('image', img2)
min_x = min(point1[0], point2[0])
min_y = min(point1[1], point2[1])
width = abs(point1[0] - point2[0])
height = abs(point1[1] - point2[1])
cut_img = img[min_y:min_y + height, min_x:min_x + width]
detection(cut_img)
class Ui_mainWindow(QMainWindow):
def setupUi(self, mainWindow):
mainWindow.setObjectName("mainWindow")
mainWindow.setEnabled(True)
mainWindow.resize(958, 627)
self.centralwidget = QtWidgets.QWidget(mainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(630, 380, 91, 51))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(12)
self.label.setFont(font)
self.label.setStyleSheet("color: rgb(0, 0, 0);")
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(630, 430, 201, 51))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setStyleSheet("color: rgb(0, 0, 0);")
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(630, 480, 221, 51))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(12)
self.label_3.setFont(font)
self.label_3.setStyleSheet("color: rgb(0, 0, 0);")
self.label_3.setObjectName("label_3")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(640, 100, 93, 28))
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
self.pushButton.setFont(font)
self.pushButton.setStyleSheet("color: rgb(0, 0, 0);")
self.pushButton.setObjectName("pushButton")
self.graphicsView = QtWidgets.QGraphicsView(self.centralwidget)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.gridLayout.addWidget(self.graphicsView , 10, 30, 601, 541)
self.graphicsView.setGeometry(QtCore.QRect(10, 30, 601, 541))
self.graphicsView.setObjectName("graphicsView")
# self.lb = MyLabel(self)
# self.lb.setGeometry(QRect(10, 30, 601, 541))
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(630, 50, 111, 31))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(12)
self.pushButton_2.setFont(font)
self.pushButton_2.setMouseTracking(False)
self.pushButton_2.setStyleSheet("color: rgb(0, 0, 0);")
self.pushButton_2.setObjectName("pushButton_2")
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setEnabled(False)
self.textEdit.setGeometry(QtCore.QRect(720, 390, 104, 31))
self.textEdit.setStyleSheet("\n"
"background-color: rgb(255, 255, 255);")
self.textEdit.setObjectName("textEdit")
self.textEdit_2 = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit_2.setEnabled(False)
self.textEdit_2.setGeometry(QtCore.QRect(820, 440, 111, 31))
self.textEdit_2.setStyleSheet("\n"
"background-color: rgb(255, 255, 255);")
self.textEdit_2.setObjectName("textEdit_2")
self.textEdit_3 = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit_3.setEnabled(False)
self.textEdit_3.setGeometry(QtCore.QRect(84
|
_LBUTTON
|
identifier_name
|
Detector.py
|
getcontext().prec = 4
s = Decimal(count) / Decimal((img4.shape[0] * img4.shape[1] - all))
return count, s
count, s = area(img3, img2)
str = '要显示的字符串'
print("舌像裂纹面积为:{} 像素点, 占整个舌头像素的:{}".format(count, s))
result = lwdt(img3)
r = result
c = count
s1 = s
mainMindow.shows(img3)
mainMindow.textEdit_4.setText(mainMindow.file)
mainMindow.textEdit_3.setText("{}".format(s1))
mainMindow.textEdit_2.setText("{}".format(c))
mainMindow.textEdit.setText("{}".format(r))
# file_name = file.split('.')[0]
# os.makedirs("./new/{}".format(file_name))
# cv2.imwrite("./new/{}/{}".format(file_name, file), img00)
# cv2.imwrite("./new/{}/{}".format(file_name, "0.jpg"), img0)
# cv2.imwrite("./new/{}/{}".format(file_name, "1.jpg"), img1)
# cv2.imwrite("./new/{}/{}".format(file_name, "2.jpg"), img2)
# cv2.imwrite("./new/{}/{}".format(file_name, "3.jpg"), img3)
def on_mouse(event, x, y, flags, param):
global point1, point2, img
img2 = img.copy()
if event == cv2.EVENT_LBUTTONDOWN: # 左键点击
point1 = (x, y)
# 按住左键拖曳
cv2.rectangle(img2, point1, (x, y), (0, 255, 0), 1)
cv2.namedWindow("image", 0)
cv2.imshow('image', img2)
elif event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG_LBUTTON): # 按住左键拖曳
cv2.rectangle(img2, point1, (x, y), (0, 255, 0), 1)
cv2.imshow('image', img2)
elif event == cv2.EVENT_LBUTTONUP: # 左键释放
point2 = (x, y)
cv2.rectangle(img2, point1, point2, (0, 0, 255), 1)
cv2.imshow('image', img2)
min_x = min(point1[0], point2[0])
min_y = min(point1[1], point2[1])
width = abs(point1[0] - point2[0])
height = abs(point1[1] - point2[1])
cut_img = img[min_y:min_y + height, min_x:min_x + width]
detection(cut_img)
class Ui_mainWindow(QMainWindow):
def setupUi(self, mainWindow):
mainWindow.setObjectName("mainWindow")
mainWindow.setEnabled(True)
mainWindow.resize(958, 627)
self.centralwidget = QtWidgets.QWidget(mainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(630, 380, 91, 51))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(12)
self.label.setFont(font)
self.label.setStyleSheet("color: rgb(0, 0, 0);")
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(630, 430, 201, 51))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setStyleSheet("color: rgb(0, 0, 0);")
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(630, 480, 221, 51))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(12)
self.label_3.setFont(font)
self.label_3.setStyleSheet("color: rgb(0, 0, 0);")
self.label_3.setObjectName("label_3")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(640, 100, 93, 28))
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
self.pushButton.setFont(font)
self.pushButton.setStyleSheet("color: rgb(0, 0, 0);")
self.pushButton.setObjectName("pushButton")
self.graphicsView = QtWidgets.QGraphicsView(self.centralwidget)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.gridLayout.addWidget(self.graphicsView , 10, 30, 601, 541)
self.graphicsView.setGeometry(QtCore.QRect(10, 30, 601, 541))
self.graphicsView.setObjectName("graphicsView")
# self.lb = MyLabel(self)
# self.lb.setGeometry(QRect(10, 30, 601, 541))
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(630, 50, 111, 31))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(12)
self.pushButton_2.setFont(font)
self.pushButton_2.setMouseTracking(False)
self.pushButton_2.setStyleSheet("color: rgb(0, 0, 0);")
self.pushButton_2.setObjectName("pushButton_2")
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setEnabled(False)
self.textEdit.setGeometry(QtCore.QRect(720, 390, 104, 31))
self.textEdit.setStyleSheet("\n"
"background-color: rgb(255, 255, 255);")
self.textEdit.setObjectName("textEdit")
self.textEdit_2 = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit_2.setEnabled(False)
self.textEdit_2.setGeometry(QtCore.QRect(820, 440, 111, 31))
self.textEdit_2.setStyleSheet("\n"
"background-color: rgb(255, 255, 255);")
self.textEdit_2.setObjectName("textEdit_2")
self.textEdit_3 = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit_3.setEnabled(False)
self.textEdit_3.setGeometry(QtCore.QRect(840, 490, 104, 31))
self.textEdit_3.setStyleSheet("\n"
"background-color: rgb(255, 255, 255);")
self.textEdit_3.setObjectName("textEdit_3")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(630, 160, 111, 31))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(12)
self.pushButton_3.setFont(font)
self.pushButton_3.setMouseTracking(True)
self.pushButton_3.setStyleSheet("color: rgb(0, 0, 0);")
self.pushButton_3.setObjectName("pushButton_3")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(640, 210, 141, 51))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(12)
self.label_4.setFont(font)
self.label_4.setStyleSheet("color: rgb(0, 0, 0);")
self.label_4.setObjectName("label_4")
self.textEdit_4 = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit_4.setEnabled(False)
self.textEdit_4.setGeometry(QtCore.QRect(640, 260, 281, 31))
self.textEdit_4.setStyleSheet("\n"
"background-color: rgb(255, 255, 255);")
self.textEdit_4.setObjectName("textEdit_4")
self.label_2.raise_()
self.label_3.raise_()
self.pushButton.raise_()
self.graphicsView.raise_()
self.label.raise_()
self.pushButton_2.raise_()
self.textEdit.raise_()
self.textEdit_2.raise_()
self.textEdit_3.raise_()
self.pushButton_3.raise_()
self.label_4.raise_()
self.textEdit_4.raise_()
|
# self.lb.raise_()
mainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(mainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 958, 26))
self.menubar.setObjectName("menubar")
|
random_line_split
|
|
Detector.py
|
1]
# 图片处理
def bi_demo(image, d, m, n): # 双边滤波
dst = cv2.bilateralFilter(image, d, m, n)
return dst
kernel = np.ones((6, 6), dtype=np.uint8)
erosion = cv2.erode(img2, kernel, 16)
img2 = cv2.dilate(img2, kernel, 25)
img2 = cv2.morphologyEx(img2, cv2.MORPH_CLOSE, kernel, 19)
img3 = bi_demo(img2, 9, 10, 19)
def detect(image): ###裂纹检测
start = 0
l = image.shape[0] // 20
for i in range(0, l + 1, 1):
end = start + 20
if i == l:
imag = image[start:image.shape[0], :]
else:
imag = image[start:end, :]
max1 = max(max(row) for row in imag)
for y in range(0, imag.shape[0], 1):
for x in range(0, imag.shape[1], 1):
if (max1 * 8 // 9 < imag[y, x] < 180):
imag[y, x] = 255
else:
imag[y, x] = 0
if i == l:
image[start:image.shape[0], :] = imag[:, :]
else:
image[start:end, :] = imag[:, :]
start = end
detect(img3)
# 面积计算
def area(image, image0): ###计算裂纹所占像素
count = 0
all = 0
for y in range(0, image0.shape[0], 1):
|
):
if (image0[y, x] == 0):
all = all + 1
img4 = img[:, :, 0]
for y in range(0, image.shape[0], 1):
for x in range(0, image.shape[1], 1):
if (image[y, x] == 255):
count = count + 1
getcontext().prec = 4
s = Decimal(count) / Decimal((img4.shape[0] * img4.shape[1] - all))
return count, s
count, s = area(img3, img2)
str = '要显示的字符串'
print("舌像裂纹面积为:{} 像素点, 占整个舌头像素的:{}".format(count, s))
result = lwdt(img3)
r = result
c = count
s1 = s
mainMindow.shows(img3)
mainMindow.textEdit_4.setText(mainMindow.file)
mainMindow.textEdit_3.setText("{}".format(s1))
mainMindow.textEdit_2.setText("{}".format(c))
mainMindow.textEdit.setText("{}".format(r))
# file_name = file.split('.')[0]
# os.makedirs("./new/{}".format(file_name))
# cv2.imwrite("./new/{}/{}".format(file_name, file), img00)
# cv2.imwrite("./new/{}/{}".format(file_name, "0.jpg"), img0)
# cv2.imwrite("./new/{}/{}".format(file_name, "1.jpg"), img1)
# cv2.imwrite("./new/{}/{}".format(file_name, "2.jpg"), img2)
# cv2.imwrite("./new/{}/{}".format(file_name, "3.jpg"), img3)
def on_mouse(event, x, y, flags, param):
global point1, point2, img
img2 = img.copy()
if event == cv2.EVENT_LBUTTONDOWN: # 左键点击
point1 = (x, y)
# 按住左键拖曳
cv2.rectangle(img2, point1, (x, y), (0, 255, 0), 1)
cv2.namedWindow("image", 0)
cv2.imshow('image', img2)
elif event == cv2.EVENT_MOUSEMOVE and (flags & cv2.EVENT_FLAG_LBUTTON): # 按住左键拖曳
cv2.rectangle(img2, point1, (x, y), (0, 255, 0), 1)
cv2.imshow('image', img2)
elif event == cv2.EVENT_LBUTTONUP: # 左键释放
point2 = (x, y)
cv2.rectangle(img2, point1, point2, (0, 0, 255), 1)
cv2.imshow('image', img2)
min_x = min(point1[0], point2[0])
min_y = min(point1[1], point2[1])
width = abs(point1[0] - point2[0])
height = abs(point1[1] - point2[1])
cut_img = img[min_y:min_y + height, min_x:min_x + width]
detection(cut_img)
class Ui_mainWindow(QMainWindow):
def setupUi(self, mainWindow):
mainWindow.setObjectName("mainWindow")
mainWindow.setEnabled(True)
mainWindow.resize(958, 627)
self.centralwidget = QtWidgets.QWidget(mainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(630, 380, 91, 51))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(12)
self.label.setFont(font)
self.label.setStyleSheet("color: rgb(0, 0, 0);")
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(630, 430, 201, 51))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setStyleSheet("color: rgb(0, 0, 0);")
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(630, 480, 221, 51))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(12)
self.label_3.setFont(font)
self.label_3.setStyleSheet("color: rgb(0, 0, 0);")
self.label_3.setObjectName("label_3")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(640, 100, 93, 28))
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
self.pushButton.setFont(font)
self.pushButton.setStyleSheet("color: rgb(0, 0, 0);")
self.pushButton.setObjectName("pushButton")
self.graphicsView = QtWidgets.QGraphicsView(self.centralwidget)
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.gridLayout.addWidget(self.graphicsView , 10, 30, 601, 541)
self.graphicsView.setGeometry(QtCore.QRect(10, 30, 601, 541))
self.graphicsView.setObjectName("graphicsView")
# self.lb = MyLabel(self)
# self.lb.setGeometry(QRect(10, 30, 601, 541))
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(630, 50, 111, 31))
font = QtGui.QFont()
font.setFamily("微软雅黑")
font.setPointSize(12)
self.pushButton_2.setFont(font)
self.pushButton_2.setMouseTracking(False)
self.pushButton_2.setStyleSheet("color: rgb(0, 0, 0);")
self.pushButton_2.setObjectName("pushButton_2")
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setEnabled(False)
self.textEdit.setGeometry(QtCore.QRect(720, 390, 104, 31))
self.textEdit.setStyleSheet("\n"
"background-color: rgb(255, 255, 255);")
self.textEdit.setObjectName("textEdit")
self.textEdit_2 = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit_2.setEnabled(False)
self.textEdit_2.setGeometry(QtCore.QRect(820, 440, 111, 31))
self.textEdit_2.setStyleSheet("\n"
"background-color: rgb(255, 255, 255);")
self.textEdit_2.setObjectName("textEdit_2")
self.textEdit_3 = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit_3.setEnabled(False)
self.textEdit_3.setGeometry(QtCore.QRect(84
|
for x in range(0, image0.shape[1], 1
|
conditional_block
|
admin_panel.py
|
tgid"] in UsersDB.allAdminsId():
await message.answer(f'Пользователь {user["username"]} уже является админом!')
return
# Successful adding admin
UsersDB.update(user["tgid"], "role", Role.Admin.value)
await message.answer(f'@{user["username"]} Назначен Администратором!')
await bot.send_message(chat_id=user["tgid"],
text=f'Вы назначены Администратором!'
f'Ваш бог: @{message.from_user.username}')
@dp.message_handler(IsAdmin(), commands=[CMDS["CHANGE_FEE"]])
async def cmdAddAdmin(message: Message, command):
if command and command.args:
nick = command.args.split()[0]
user = UsersDB.getUserByContraNick(nick)
# If username is not valid
if not user:
await message.answer('Пользователь {nick} не найден!')
return
# If wrong num of args
if len(command.args.split()) < 2:
await message.answer('Не корректный запрос')
return
fee = command.args.split()[1]
if fee == -1:
fee = os.environ["GLOBAL_FEE"]
# If invalid fee
if not isValidFloat(fee):
await message.answer('Не корректный размер комиссии')
return
# Successful changed fee
UsersDB.update(user["tgid"], "custom_fee", fee)
await message.answer(f'У @{user["username"]} теперь комиссия составляет {fee}%!')
await bot.send_message(chat_id=user["tgid"],
text=f'Вам изменили комиссию до {fee}%!'
f'Ваш бог: @{message.from_user.username}')
# ==================== MESSAGES ================== #
@dp.message_handler(IsAdmin(), Text(NAV["SEND_MSG"]))
async def txtBroadcast(message: Message):
await message.answer(text="⭐Выбери опцию кому отправить сообщение",
reply_markup=nav.broadcast_menu)
@dp.message_handler(IsAdmin(), Text(NAV["SEND_MSG_C"]))
async def txtBroadcastContra(message: Message):
await message.answer(text="⭐Отправь сообщение для рассылки исполнителям",
reply_markup=ReplyKeyboardRemove())
await AdminPanel.Broadcast.set()
await dp.get_current().current_state().update_data(broadcast_opt="CONTRAS")
@dp.message_handler(IsAdmin(), Text(NAV["SEND_MSG_ALL"]))
async def txtBroadcastAll(message: Message):
await message.answer(text="⭐Отправь сообщение для рассылки всем пользователям",
reply_markup=ReplyKeyboardRemove())
await AdminPanel.Broadcast.set()
await dp.get_current().current_state().update_data(broadcast_opt="ALL")
@dp.message_handler(IsAdmin(), Text(NAV["QA_CONFIRM"]), state=AdminPanel.Broadcast)
async def stateBroadcastContras(message: Message, state: FSMContext):
opt = (await state.get_data())["broadcast_opt"]
broadcast_msg = (await state.get_data())["broadcast_msg"]
users = UsersDB.allContrasId() if opt == "CONTRAS" else \
UsersDB.allUsersId() if opt == "ALL" else []
# Send broadcasted
for user in users:
await bot.send_message(chat_id=user, text=MSG["BROADCAST"].format(broadcast_msg))
# Answer to god
await message.answer(text="Сообщение разослано!",
reply_markup=nav.startMenu(message.from_user.id))
await state.finish()
@dp.message_handler(IsAdmin(), Text(NAV["QA_CHANGE"]), state=AdminPanel.Broadcast)
async def stateBroadcastContras(message: Message):
await message.answer(text="Введите сообщение заново:",
reply_markup=ReplyKeyboardRemove())
await AdminPanel.Broadcast.set()
@dp.message_handler(IsAdmin(), Text(NAV["QA_CANCEL"]), state=AdminPanel.Broadcast)
async def stateBroadcastContras(message: Message, state: FSMContext):
await message.answer(text="🚫 Рассылка отме
|
tent_types=[ContentType.ANY])
async def stateBroadcastAll(message: Message, state: FSMContext):
q = str(message.text).replace('<', '').replace('>', '')
await state.update_data(broadcast_msg=q)
await message.answer(text="⭐Вот превью рассылки\n"
"———————————————\n"
f"{q}\n"
"———————————————\n",
reply_markup=nav.confirm_menu)
@dp.message_handler(IsAdmin(), Text(NAV["BACK"]))
async def txtAdminPanel(message: Message):
await message.answer(text="⭐🌟⭐🌟⭐🌟⭐🌟", reply_markup=nav.startMenu(message.from_user.id))
@dp.message_handler(IsAdmin(), Text(NAV["ADMIN_P"]))
async def txtAdminPanel(message: Message):
await message.answer(text="⭐🌟⭐🌟 ADMIN PANEL⭐🌟⭐🌟\n"
"Изменить комиссию исполнителю - /change_fee 'nick' 'new fee'\n"
"Назначить админом исполнителя /add_admin 'nick'",
reply_markup=nav.admin_panel)
@dp.message_handler(IsAdmin(), Text(NAV["AP_WITHDRAW_REQUESTS"]))
async def txtAdminWDRequests(message: Message):
requests = [x for x in WithdrawsDB.all_requests() if x["status"] == "WAITING"]
if len(requests) == 0:
await message.answer(text=MSG["NO_WD_REQUESTS"])
return
for reqest in requests:
u_data = UsersDB.getUser(reqest["sender_id"])
await message.answer(text=MSG["WAITING_WD_REQUEST"].format(
'@' + u_data["username"],
reqest["amount"],
u_data["withdraw_data"]["card"]
), reply_markup=nav.judge_wd(reqest["sender_id"], reqest["trans_id"]))
@dp.message_handler(IsAdmin(), Text(NAV["AP_REG_REQUESTS"]))
async def txtAdminRegRequests(message: Message):
requests = [x for x in UsersDB.allUsers() if ":WAIT_REGISTER:" in x["statuses"]]
for reqest in requests:
txt = MSG["NEW_CONTRA"].format(reqest["contra_nick"], reqest["description"], getLinks(reqest["soc_net_links"]))
reply = nav.judge_contra(reqest["tgid"])
if "photo" in reqest and reqest["photo"] != "NO_PHOTO":
await message.answer_photo(photo=reqest["photo"], caption=txt, reply_markup=reply)
else:
await message.answer(text=txt, reply_markup=reply)
if len(requests) == 0:
await message.answer(text=MSG["NO_REG_REQUESTS"])
@dp.message_handler(IsAdmin(), Text(NAV["CHANGE_GLOBAL_FEE"]))
async def txtAdminFee(message: Message):
await message.answer(text=f'Текущая комиссия: {os.environ["GLOBAL_FEE"]}%\n'
f'Введите новый размер комиссии: 1-99')
await AdminPanel.Fee.set()
@dp.message_handler(IsAdmin(), state=AdminPanel.Fee)
async def txtAdminFee(message: Message, state: FSMContext):
if message.text.replace('.', '', 1).isdigit() and 0 < float(message.text) < 100:
await message.answer(text=f'Установлен новый размер комиссии: {float(message.text)}%')
os.putenv("GLOBAL_FEE", message.text)
os.environ["GLOBAL_FEE"] = message.text
await state.finish()
else:
await message.answer(text=f'Некорректный ввод!')
await message.answer(text='Введите новый размер комиссии: 1-99')
await AdminPanel.Fee.set()
# ==================== CALLBACKS ================== #
@dp.callback_query_handler(lambda x: "JUDGE_CONTRA" in x.data)
async def judgeContra(cb: CallbackQuery):
_, action, user_id = cb.data.split(':')
user_username = UsersDB.get(user_id, "username")
txt = ""
if action == "ACCEPT":
UsersDB.update(user_id, "statuses", ":CONTRA:")
if UsersDB.get(user_id, "role") != Role.Admin.value:
UsersDB.update(user_id, "role", Role.User.value)
txt = f"✅ Заявка пользователя <b>{user_username}</b> одобрена"
await bot.send_message(chat_id=user_id, text=MSG["YOUR_REG_CONFIRMED"],
reply_markup=nav.startMenu(user_id))
elif action == "REJECT":
txt = f"🚫 Заявка пользователя <b>{user_username}</b> отклонена"
await bot.send_message(chat_id=user_id, text=MSG["YOUR_REG_REJECTED"],
reply_markup=nav.startMenu(user_id))
try:
await cb.message.edit_caption(caption=txt)
except:
await cb.message.edit_text(text=txt)
await cb.answer()
@dp.callback_query_handler(lambda x: "JUDGE_WD:" in x.data)
async def judgeContra(cb: CallbackQuery):
_, action, user_id, trans = cb.data.split(':')
await cb.answer()
if action in ["WITHDRAW",
|
нена",
reply_markup=nav.startMenu(message.from_user.id))
await state.finish()
@dp.message_handler(IsAdmin(), state=AdminPanel.Broadcast, con
|
identifier_body
|
admin_panel.py
|
(chat_id=user["tgid"],
text=f'Вы назначены Администратором!'
f'Ваш бог: @{message.from_user.username}')
@dp.message_handler(IsAdmin(), commands=[CMDS["CHANGE_FEE"]])
async def cmdAddAdmin(message: Message, command):
if command and command.args:
nick = command.args.split()[0]
user = UsersDB.getUserByContraNick(nick)
# If username is not valid
if not user:
await message.answer('Пользователь {nick} не найден!')
return
# If wrong num of args
if len(command.args.split()) < 2:
await message.answer('Не корректный запрос')
return
fee = command.args.split()[1]
if fee == -1:
fee = os.environ["GLOBAL_FEE"]
# If invalid fee
if not isValidFloat(fee):
await message.answer('Не корректный размер комиссии')
return
# Successful changed fee
UsersDB.update(user["tgid"], "custom_fee", fee)
await message.answer(f'У @{user["username"]} теперь комиссия составляет {fee}%!')
await bot.send_message(chat_id=user["tgid"],
text=f'Вам изменили комиссию до {fee}%!'
f'Ваш бог: @{message.from_user.username}')
# ==================== MESSAGES ================== #
@dp.message_handler(IsAdmin(), Text(NAV["SEND_MSG"]))
async def txtBroadcast(message: Message):
await message.answer(text="⭐Выбери опцию кому отправить сообщение",
reply_markup=nav.broadcast_menu)
@dp.message_handler(IsAdmin(), Text(NAV["SEND_MSG_C"]))
async def txtBroadcastContra(message: Message):
await message.answer(text="⭐Отправь сообщение для рассылки исполнителям",
reply_markup=ReplyKeyboardRemove())
await AdminPanel.Broadcast.set()
await dp.get_current().current_state().update_data(broadcast_opt="CONTRAS")
@dp.message_handler(IsAdmin(), Text(NAV["SEND_MSG_ALL"]))
async def txtBroadcastAll(message: Message):
await message.answer(text="⭐Отправь сообщение для рассылки всем пользователям",
reply_markup=ReplyKeyboardRemove())
await AdminPanel.Broadcast.set()
await dp.get_current().current_state().update_data(broadcast_opt="ALL")
@dp.message_handler(IsAdmin(), Text(NAV["QA_CONFIRM"]), state=AdminPanel.Broadcast)
async def stateBroadcastContras(message: Message, state: FSMContext):
opt = (await state.get_data())["broadcast_opt"]
broadcast_msg = (await state.get_data())["broadcast_msg"]
users = UsersDB.allContrasId() if opt == "CONTRAS" else \
UsersDB.allUsersId() if opt == "ALL" else []
# Send broadcasted
for user in users:
await bot.send_message(chat_id=user, text=MSG["BROADCAST"].format(broadcast_msg))
# Answer to god
await message.answer(text="Сообщение разослано!",
reply_markup=nav.startMenu(message.from_user.id))
await state.finish()
@dp.message_handler(IsAdmin(), Text(NAV["QA_CHANGE"]), state=AdminPanel.Broadcast)
async def stateBroadcastContras(message: Message):
await message.answer(text="Введите сообщение заново:",
reply_markup=ReplyKeyboardRemove())
await AdminPanel.Broadcast.set()
@dp.message_handler(IsAdmin(), Text(NAV["QA_CANCEL"]), state=AdminPanel.Broadcast)
async def stateBroadcastContras(message: Message, state: FSMContext):
await message.answer(text="🚫 Рассылка отменена",
reply_markup=nav.startMenu(message.from_user.id))
await state.finish()
@dp.message_handler(IsAdmin(), state=AdminPanel.Broadcast, content_types=[ContentType.ANY])
async def stateBroadcastAll(message: Message, state: FSMContext):
q = str(message.text).replace('<', '').replace('>', '')
await state.update_data(broadcast_msg=q)
await message.answer(text="⭐Вот превью рассылки\n"
"———————————————\n"
f"{q}\n"
"———————————————\n",
reply_markup=nav.confirm_menu)
@dp.message_handler(IsAdmin(), Text(NAV["BACK"]))
async def txtAdminPanel(message: Message):
await message.answer(text="⭐🌟⭐🌟⭐🌟⭐🌟", reply_markup=nav.startMenu(message.from_user.id))
@dp.message_handler(IsAdmin(), Text(NAV["ADMIN_P"]))
async def txtAdminPanel(message: Message):
await message.answer(text="⭐🌟⭐🌟 ADMIN PANEL⭐🌟⭐🌟\n"
"Изменить комиссию исполнителю - /change_fee 'nick' 'new fee'\n"
"Назначить админом исполнителя /add_admin 'nick'",
reply_markup=nav.admin_panel)
@dp.message_handler(IsAdmin(), Text(NAV["AP_WITHDRAW_REQUESTS"]))
async def txtAdminWDRequests(message: Message):
requests = [x for x in WithdrawsDB.all_requests() if x["status"] == "WAITING"]
if len(requests) == 0:
await message.answer(text=MSG["NO_WD_REQUESTS"])
return
for reqest in requests:
u_data = UsersDB.getUser(reqest["sender_id"])
await message.answer(text=MSG["WAITING_WD_REQUEST"].format(
'@' + u_data["username"],
reqest["amount"],
u_data["withdraw_data"]["card"]
), reply_markup=nav.judge_wd(reqest["sender_id"], reqest["trans_id"]))
@dp.message_handler(IsAdmin(), Text(NAV["AP_REG_REQUESTS"]))
async def txtAdminRegRequests(message: Message):
requests = [x for x in UsersDB.allUsers() if ":WAIT_REGISTER:" in x["statuses"]]
for reqest in requests:
txt = MSG["NEW_CONTRA"].format(reqest["contra_nick"], reqest["description"], getLinks(reqest["soc_net_links"]))
reply = nav.judge_contra(reqest["tgid"])
if "photo" in reqest and reqest["photo"] != "NO_PHOTO":
await message.answer_photo(photo=reqest["photo"], caption=txt, reply_markup=reply)
else:
await message.answer(text=txt, reply_markup=reply)
if len(requests) == 0:
await message.answer(text=MSG["NO_REG_REQUESTS"])
@dp.message_handler(IsAdmin(), Text(NAV["CHANGE_GLOBAL_FEE"]))
async def txtAdminFee(message: Message):
await message.answer(text=f'Текущая комиссия: {os.environ["GLOBAL_FEE"]}%\n'
f'Введите новый размер комиссии: 1-99')
await AdminPanel.Fee.set()
@dp.message_handler(IsAdmin(), state=AdminPanel.Fee)
async def txtAdminFee(message: Message, state: FSMContext):
if message.text.replace('.', '', 1).isdigit() and 0 < float(message.text) < 100:
await message.answer(text=f'Установлен новый размер комиссии: {float(message.text)}%')
os.putenv("GLOBAL_FEE", message.text)
os.environ["GLOBAL_FEE"] = message.text
await state.finish()
else:
await message.answer(text=f'Некорректный ввод!')
await message.answer(text='Введите новый размер комиссии: 1-99')
await AdminPanel.Fee.set()
# ==================== CALLBACKS ================== #
@dp.callback_query_handler(lambda x: "JUDGE_CONTRA" in x.data)
async def judgeContra(cb: CallbackQuery):
_, action, user_id = cb.data.split(':')
user_username = UsersDB.get(user_id, "username")
txt = ""
if action == "ACCEPT":
UsersDB.update(user_id, "statuses", ":CONTRA:")
if UsersDB.get(user_id, "role") != Role.Admin.value:
UsersDB.update(user_id, "role", Role.User.value)
txt = f"✅ Заявка пользователя <b>{user_username}</b> одобрена"
await bot.send_message(chat_id=user_id, text=MSG["YOUR_REG_CONFIRMED"],
reply_markup=nav.startMenu(user_id))
elif action == "REJECT":
txt = f"🚫 Заявка пользователя <b>{user_username}</b> отклонена"
await bot.send_message(chat_id=user_id, text=MSG["YOUR_REG_REJECTED"],
reply_markup=nav.startMenu(user_id))
try:
await cb.message.edit_caption(caption=txt)
except:
await cb.message.edit_text(text=txt)
await cb.answer()
@dp.callback_query_handler(lambda x: "JUDGE_WD:" in x.data)
async def judgeContra(cb: CallbackQuery):
_, action, user_id, trans = cb.data.split(':')
await cb.answer()
if action in ["WITHDRAW", "QIWIPAY"]:
wd = WithdrawsDB.get(int(trans))
if action == "QIWIPAY":
s = qiwiAutoPay(UsersDB.get(user_id, "wi
|
thdraw_data")["card"], float(wd["amount"]))
try:
await cb.message.answer(text=MSG["AUTOPAY_INFO"].format(s["fields"]["account"],
s["sum"]["
|
conditional_block
|
|
admin_panel.py
|
@dp.message_handler(IsAdmin(), commands=[CMDS["CHANGE_FEE"]])
async def cmdAddAdmin(message: Message, command):
if command and command.args:
nick = command.args.split()[0]
user = UsersDB.getUserByContraNick(nick)
# If username is not valid
if not user:
await message.answer('Пользователь {nick} не найден!')
return
# If wrong num of args
if len(command.args.split()) < 2:
await message.answer('Не корректный запрос')
return
fee = command.args.split()[1]
if fee == -1:
fee = os.environ["GLOBAL_FEE"]
# If invalid fee
if not isValidFloat(fee):
await message.answer('Не корректный размер комиссии')
return
# Successful changed fee
UsersDB.update(user["tgid"], "custom_fee", fee)
await message.answer(f'У @{user["username"]} теперь комиссия составляет {fee}%!')
await bot.send_message(chat_id=user["tgid"],
text=f'Вам изменили комиссию до {fee}%!'
f'Ваш бог: @{message.from_user.username}')
# ==================== MESSAGES ================== #
@dp.message_handler(IsAdmin(), Text(NAV["SEND_MSG"]))
async def txtBroadcast(message: Message):
await message.answer(text="⭐Выбери опцию кому отправить сообщение",
reply_markup=nav.broadcast_menu)
@dp.message_handler(IsAdmin(), Text(NAV["SEND_MSG_C"]))
async def txtBroadcastContra(message: Message):
await message.answer(text="⭐Отправь сообщение для рассылки исполнителям",
reply_markup=ReplyKeyboardRemove())
await AdminPanel.Broadcast.set()
await dp.get_current().current_state().update_data(broadcast_opt="CONTRAS")
@dp.message_handler(IsAdmin(), Text(NAV["SEND_MSG_ALL"]))
async def txtBroadcastAll(message: Message):
await message.answer(text="⭐Отправь сообщение для рассылки всем пользователям",
reply_markup=ReplyKeyboardRemove())
await AdminPanel.Broadcast.set()
await dp.get_current().current_state().update_data(broadcast_opt="ALL")
@dp.message_handler(IsAdmin(), Text(NAV["QA_CONFIRM"]), state=AdminPanel.Broadcast)
async def stateBroadcastContras(message: Message, state: FSMContext):
opt = (await state.get_data())["broadcast_opt"]
broadcast_msg = (await state.get_data())["broadcast_msg"]
users = UsersDB.allContrasId() if opt == "CONTRAS" else \
UsersDB.allUsersId() if opt == "ALL" else []
# Send broadcasted
for user in users:
await bot.send_message(chat_id=user, text=MSG["BROADCAST"].format(broadcast_msg))
# Answer to god
await message.answer(text="Сообщение разослано!",
reply_markup=nav.startMenu(message.from_user.id))
await state.finish()
@dp.message_handler(IsAdmin(), Text(NAV["QA_CHANGE"]), state=AdminPanel.Broadcast)
async def stateBroadcastContras(message: Message):
await message.answer(text="Введите сообщение заново:",
reply_markup=ReplyKeyboardRemove())
await AdminPanel.Broadcast.set()
@dp.message_handler(IsAdmin(), Text(NAV["QA_CANCEL"]), state=AdminPanel.Broadcast)
async def stateBroadcastContras(message: Message, state: FSMContext):
await message.answer(text="🚫 Рассылка отменена",
reply_markup=nav.startMenu(message.from_user.id))
await state.finish()
@dp.message_handler(IsAdmin(), state=AdminPanel.Broadcast, content_types=[ContentType.ANY])
async def stateBroadcastAll(message: Message, state: FSMContext):
q = str(message.text).replace('<', '').replace('>', '')
await state.update_data(broadcast_msg=q)
await message.answer(text="⭐Вот превью рассылки\n"
"———————————————\n"
f"{q}\n"
"———————————————\n",
reply_markup=nav.confirm_menu)
@dp.message_handler(IsAdmin(), Text(NAV["BACK"]))
async def txtAdminPanel(message: Message):
await message.answer(text="⭐🌟⭐🌟⭐🌟⭐🌟", reply_markup=nav.startMenu(message.from_user.id))
@dp.message_handler(IsAdmin(), Text(NAV["ADMIN_P"]))
async def txtAdminPanel(message: Message):
await message.answer(text="⭐🌟⭐🌟 ADMIN PANEL⭐🌟⭐🌟\n"
"Изменить комиссию исполнителю - /change_fee 'nick' 'new fee'\n"
"Назначить админом исполнителя /add_admin 'nick'",
reply_markup=nav.admin_panel)
@dp.message_handler(IsAdmin(), Text(NAV["AP_WITHDRAW_REQUESTS"]))
async def txtAdminWDRequests(message: Message):
requests = [x for x in WithdrawsDB.all_requests() if x["status"] == "WAITING"]
if len(requests) == 0:
await message.answer(text=MSG["NO_WD_REQUESTS"])
return
for reqest in requests:
u_data = UsersDB.getUser(reqest["sender_id"])
await message.answer(text=MSG["WAITING_WD_REQUEST"].format(
'@' + u_data["username"],
reqest["amount"],
u_data["withdraw_data"]["card"]
), reply_markup=nav.judge_wd(reqest["sender_id"], reqest["trans_id"]))
@dp.message_handler(IsAdmin(), Text(NAV["AP_REG_REQUESTS"]))
async def txtAdminRegRequests(message: Message):
requests = [x for x in UsersDB.allUsers() if ":WAIT_REGISTER:" in x["statuses"]]
for reqest in requests:
txt = MSG["NEW_CONTRA"].format(reqest["contra_nick"], reqest["description"], getLinks(reqest["soc_net_links"]))
reply = nav.judge_contra(reqest["tgid"])
if "photo" in reqest and reqest["photo"] != "NO_PHOTO":
await message.answer_photo(photo=reqest["photo"], caption=txt, reply_markup=reply)
else:
await message.answer(text=txt, reply_markup=reply)
if len(requests) == 0:
await message.answer(text=MSG["NO_REG_REQUESTS"])
@dp.message_handler(IsAdmin(), Text(NAV["CHANGE_GLOBAL_FEE"]))
async def txtAdminFee(message: Message):
await message.answer(text=f'Текущая комиссия: {os.environ["GLOBAL_FEE"]}%\n'
f'Введите новый размер комиссии: 1-99')
await AdminPanel.Fee.set()
@dp.message_handler(IsAdmin(), state=AdminPanel.Fee)
async def txtAdminFee(message: Message, state: FSMContext):
if message.text.replace('.', '', 1).isdigit() and 0 < float(message.text) < 100:
await message.answer(text=f'Установлен новый размер комиссии: {float(message.text)}%')
os.putenv("GLOBAL_FEE", message.text)
os.environ["GLOBAL_FEE"] = message.text
await state.finish()
else:
await message.answer(text=f'Некорректный ввод!')
await message.answer(text='Введите новый размер комиссии: 1-99')
await AdminPanel.Fee.set()
# ==================== CALLBACKS ================== #
@dp.callback_query_handler(lambda x: "JUDGE_CONTRA" in x.data)
async def judgeContra(cb: CallbackQuery):
_, action, user_id = cb.data.split(':')
user_username = UsersDB.get(user_id, "username")
txt = ""
if action == "ACCEPT":
UsersDB.update(user_id, "statuses", ":CONTRA:")
if UsersDB.get(user_id, "role") != Role.Admin.value:
UsersDB.update(user_id, "role", Role.User.value)
txt = f"✅ Заявка пользователя <b>{user_username}</b> одобрена"
await bot.send_message(chat_id=user_id, text=MSG["YOUR_REG_CONFIRMED"],
reply_markup=nav.startMenu(user_id))
elif action == "REJECT":
txt = f"🚫 Заявка пользователя <b>{user_username}</b> отклонена"
await bot.send_message(chat_id=user_id, text=MSG["YOUR_REG_REJECTED"],
reply_markup=nav.startMenu(user_id))
try:
await cb.message.edit_caption(caption=txt)
except:
await cb.message.edit_text(text=txt)
await cb.answer()
@dp.callback_query_handler(lambda x: "JUDGE_WD:" in x.data)
async def judgeContra(cb: CallbackQuery):
_, action, user_id, trans = cb.data.split(':')
await cb.answer()
if action in ["WITHDRAW", "QIWIPAY"]:
wd = WithdrawsDB.get(int(trans))
if action == "QIWIPAY":
s = qiwiAutoPay(UsersDB.get(user_id, "withdraw_data")["card"], float(wd["amount"]))
try:
await cb.message.answer(text=MSG["AUTOPAY_INFO"].format(s["fields"]["account"],
s["sum"]["amount"],
s["transaction"]["id"]))
except:
await cb.message.answer(text='Ошибка при автопереводе: ' + str(s["message"]))
|
identifier_name
|
||
admin_panel.py
|
tgid"] in UsersDB.allAdminsId():
await message.answer(f'Пользователь {user["username"]} уже является админом!')
return
# Successful adding admin
UsersDB.update(user["tgid"], "role", Role.Admin.value)
await message.answer(f'@{user["username"]} Назначен Администратором!')
await bot.send_message(chat_id=user["tgid"],
text=f'Вы назначены Администратором!'
f'Ваш бог: @{message.from_user.username}')
@dp.message_handler(IsAdmin(), commands=[CMDS["CHANGE_FEE"]])
async def cmdAddAdmin(message: Message, command):
if command and command.args:
nick = command.args.split()[0]
user = UsersDB.getUserByContraNick(nick)
# If username is not valid
if not user:
await message.answer('Пользователь {nick} не найден!')
return
# If wrong num of args
if len(command.args.split()) < 2:
await message.answer('Не корректный запрос')
return
fee = command.args.split()[1]
if fee == -1:
fee = os.environ["GLOBAL_FEE"]
# If invalid fee
if not isValidFloat(fee):
await message.answer('Не корректный размер комиссии')
return
# Successful changed fee
UsersDB.update(user["tgid"], "custom_fee", fee)
await message.answer(f'У @{user["username"]} теперь комиссия составляет {fee}%!')
await bot.send_message(chat_id=user["tgid"],
text=f'Вам изменили комиссию до {fee}%!'
f'Ваш бог: @{message.from_user.username}')
# ==================== MESSAGES ================== #
@dp.message_handler(IsAdmin(), Text(NAV["SEND_MSG"]))
async def txtBroadcast(message: Message):
await message.answer(text="⭐Выбери опцию кому отправить сообщение",
reply_markup=nav.broadcast_menu)
@dp.message_handler(IsAdmin(), Text(NAV["SEND_MSG_C"]))
async def txtBroadcastContra(message: Message):
await message.answer(text="⭐Отправь сообщение для рассылки исполнителям",
reply_markup=ReplyKeyboardRemove())
await AdminPanel.Broadcast.set()
await dp.get_current().current_state().update_data(broadcast_opt="CONTRAS")
@dp.message_handler(IsAdmin(), Text(NAV["SEND_MSG_ALL"]))
async def txtBroadcastAll(message: Message):
await message.answer(text="⭐Отправь сообщение для рассылки всем пользователям",
reply_markup=ReplyKeyboardRemove())
await AdminPanel.Broadcast.set()
await dp.get_current().current_state().update_data(broadcast_opt="ALL")
@dp.message_handler(IsAdmin(), Text(NAV["QA_CONFIRM"]), state=AdminPanel.Broadcast)
async def stateBroadcastContras(message: Message, state: FSMContext):
opt = (await state.get_data())["broadcast_opt"]
broadcast_msg = (await state.get_data())["broadcast_msg"]
users = UsersDB.allContrasId() if opt == "CONTRAS" else \
UsersDB.allUsersId() if opt == "ALL" else []
# Send broadcasted
for user in users:
await bot.send_message(chat_id=user, text=MSG["BROADCAST"].format(broadcast_msg))
# Answer to god
await message.answer(text="Сообщение разослано!",
reply_markup=nav.startMenu(message.from_user.id))
await state.finish()
@dp.message_handler(IsAdmin(), Text(NAV["QA_CHANGE"]), state=AdminPanel.Broadcast)
async def stateBroadcastContras(message: Message):
await message.answer(text="Введите сообщение заново:",
reply_markup=ReplyKeyboardRemove())
await AdminPanel.Broadcast.set()
@dp.message_handler(IsAdmin(), Text(NAV["QA_CANCEL"]), state=AdminPanel.Broadcast)
async def stateBroadcastContras(message: Message, state: FSMContext):
await message.answer(text="🚫 Рассылка отменена",
reply_markup=nav.startMenu(message.from_user.id))
await state.finish()
@dp.message_handler(IsAdmin(), state=AdminPanel.Broadcast, content_types=[ContentType.ANY])
async def stateBroadcastAll(message: Message, state: FSMContext):
q = str(message.text).replace('<', '').replace('>', '')
await state.update_data(broadcast_msg=q)
|
await message.answer(text="⭐Вот превью рассылки\n"
"———————————————\n"
f"{q}\n"
"———————————————\n",
reply_markup=nav.confirm_menu)
@dp.message_handler(IsAdmin(), Text(NAV["BACK"]))
async def txtAdminPanel(message: Message):
await message.answer(text="⭐🌟⭐🌟⭐🌟⭐🌟", reply_markup=nav.startMenu(message.from_user.id))
@dp.message_handler(IsAdmin(), Text(NAV["ADMIN_P"]))
async def txtAdminPanel(message: Message):
await message.answer(text="⭐🌟⭐🌟 ADMIN PANEL⭐🌟⭐🌟\n"
"Изменить комиссию исполнителю - /change_fee 'nick' 'new fee'\n"
"Назначить админом исполнителя /add_admin 'nick'",
reply_markup=nav.admin_panel)
@dp.message_handler(IsAdmin(), Text(NAV["AP_WITHDRAW_REQUESTS"]))
async def txtAdminWDRequests(message: Message):
requests = [x for x in WithdrawsDB.all_requests() if x["status"] == "WAITING"]
if len(requests) == 0:
await message.answer(text=MSG["NO_WD_REQUESTS"])
return
for reqest in requests:
u_data = UsersDB.getUser(reqest["sender_id"])
await message.answer(text=MSG["WAITING_WD_REQUEST"].format(
'@' + u_data["username"],
reqest["amount"],
u_data["withdraw_data"]["card"]
), reply_markup=nav.judge_wd(reqest["sender_id"], reqest["trans_id"]))
@dp.message_handler(IsAdmin(), Text(NAV["AP_REG_REQUESTS"]))
async def txtAdminRegRequests(message: Message):
requests = [x for x in UsersDB.allUsers() if ":WAIT_REGISTER:" in x["statuses"]]
for reqest in requests:
txt = MSG["NEW_CONTRA"].format(reqest["contra_nick"], reqest["description"], getLinks(reqest["soc_net_links"]))
reply = nav.judge_contra(reqest["tgid"])
if "photo" in reqest and reqest["photo"] != "NO_PHOTO":
await message.answer_photo(photo=reqest["photo"], caption=txt, reply_markup=reply)
else:
await message.answer(text=txt, reply_markup=reply)
if len(requests) == 0:
await message.answer(text=MSG["NO_REG_REQUESTS"])
@dp.message_handler(IsAdmin(), Text(NAV["CHANGE_GLOBAL_FEE"]))
async def txtAdminFee(message: Message):
await message.answer(text=f'Текущая комиссия: {os.environ["GLOBAL_FEE"]}%\n'
f'Введите новый размер комиссии: 1-99')
await AdminPanel.Fee.set()
@dp.message_handler(IsAdmin(), state=AdminPanel.Fee)
async def txtAdminFee(message: Message, state: FSMContext):
if message.text.replace('.', '', 1).isdigit() and 0 < float(message.text) < 100:
await message.answer(text=f'Установлен новый размер комиссии: {float(message.text)}%')
os.putenv("GLOBAL_FEE", message.text)
os.environ["GLOBAL_FEE"] = message.text
await state.finish()
else:
await message.answer(text=f'Некорректный ввод!')
await message.answer(text='Введите новый размер комиссии: 1-99')
await AdminPanel.Fee.set()
# ==================== CALLBACKS ================== #
@dp.callback_query_handler(lambda x: "JUDGE_CONTRA" in x.data)
async def judgeContra(cb: CallbackQuery):
_, action, user_id = cb.data.split(':')
user_username = UsersDB.get(user_id, "username")
txt = ""
if action == "ACCEPT":
UsersDB.update(user_id, "statuses", ":CONTRA:")
if UsersDB.get(user_id, "role") != Role.Admin.value:
UsersDB.update(user_id, "role", Role.User.value)
txt = f"✅ Заявка пользователя <b>{user_username}</b> одобрена"
await bot.send_message(chat_id=user_id, text=MSG["YOUR_REG_CONFIRMED"],
reply_markup=nav.startMenu(user_id))
elif action == "REJECT":
txt = f"🚫 Заявка пользователя <b>{user_username}</b> отклонена"
await bot.send_message(chat_id=user_id, text=MSG["YOUR_REG_REJECTED"],
reply_markup=nav.startMenu(user_id))
try:
await cb.message.edit_caption(caption=txt)
except:
await cb.message.edit_text(text=txt)
await cb.answer()
@dp.callback_query_handler(lambda x: "JUDGE_WD:" in x.data)
async def judgeContra(cb: CallbackQuery):
_, action, user_id, trans = cb.data.split(':')
await cb.answer()
if action in ["WITHDRAW", "
|
random_line_split
|
|
dg_terraria.py
|
2, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(nn.weight_norm(ll.NINLayer(disc_layers[-1], num_units=192, W=Normal(0.05), nonlinearity=nn.lrelu)))
disc_layers.append(ll.GlobalPoolLayer(disc_layers[-1]))
disc_layers.append(nn.MinibatchLayer(disc_layers[-1], num_kernels=100))
# Number of units in the last layer should match the number of classes.
disc_layers.append(
nn.weight_norm(ll.DenseLayer(disc_layers[-1], num_units=num_disc_class_units, W=Normal(0.05), nonlinearity=None),
train_g=True, init_stdv=0.1))
print("Finished constructing discriminator.")
#Cost functions.
print("Setting up cost functions...")
labels = T.ivector()
x_lab = T.tensor4()
x_unl = T.tensor4()
temp = ll.get_output(gen_layers[-1], deterministic=False, init=True)
temp = ll.get_output(disc_layers[-1], x_lab, deterministic=False, init=True)
init_updates = [u for l in gen_layers + disc_layers for u in getattr(l, 'init_updates', [])]
output_before_softmax_lab = ll.get_output(disc_layers[-1], x_lab, deterministic=False)
output_before_softmax_unl = ll.get_output(disc_layers[-1], x_unl, deterministic=False)
output_before_softmax_gen = ll.get_output(disc_layers[-1], gen_dat, deterministic=False)
sig1 = gen_layers[1].get_sig() # Comment this line for training/testing baseline GAN models
sigloss = T.mean((1 - sig1) * (1 - sig1)) * .05
l_lab = output_before_softmax_lab[T.arange(args.batch_size), labels]
l_unl = nn.log_sum_exp(output_before_softmax_unl)
l_gen = nn.log_sum_exp(output_before_softmax_gen)
loss_lab = -T.mean(l_lab) + T.mean(T.mean(nn.log_sum_exp(output_before_softmax_lab)))
loss_unl = -0.5 * T.mean(l_unl) + 0.5 * T.mean(T.nnet.softplus(l_unl)) + 0.5 * T.mean(T.nnet.softplus(l_gen))
loss_gen = -T.mean(T.nnet.softplus(l_gen))
train_err = T.mean(T.neq(T.argmax(output_before_softmax_lab, axis=1), labels))
#Error.
output_before_softmax = ll.get_output(disc_layers[-1], x_lab, deterministic=True)
test_err = T.mean(T.neq(T.argmax(output_before_softmax, axis=1), labels))
print("Finished setting up cost functions.")
#Training set-up.
if tdg_train:
print("Setting up Theano training...")
lr = T.scalar()
disc_params = ll.get_all_params(disc_layers, trainable=True)
disc_param_updates = nn.adam_updates(disc_params, loss_lab + args.unlabeled_weight * loss_unl, lr=lr, mom1=0.5)
disc_param_avg = [th.shared(np.cast[th.config.floatX](0. * p.get_value())) for p in disc_params]
print("Set up discriminator parameters...")
disc_avg_updates = [(a, a + 0.0001 * (p - a)) for p, a in zip(disc_params, disc_param_avg)]
print("Set up discriminator updates...")
disc_avg_givens = [(p, a) for p, a in zip(disc_params, disc_param_avg)]
print("Set up discriminator averages...")
init_param = th.function(inputs=[x_lab], outputs=None, updates=init_updates)
print("Initialized discriminator updates...")
train_batch_disc = th.function(inputs=[x_lab, labels, x_unl, lr], outputs=[loss_lab, loss_unl, train_err],
updates=disc_param_updates + disc_avg_updates)
print("Initialized discriminator training batch...")
test_batch = th.function(inputs=[x_lab, labels], outputs=test_err, givens=disc_avg_givens)
print("Initialized discriminator test batch...")
samplefun = th.function(inputs=[], outputs=gen_dat)
print("Set up Theano training for discriminator. Setting up for generator...")
# Theano functions for training the gen net
loss_gen = -T.mean(T.nnet.softplus(l_gen))
gen_params = ll.get_all_params(gen_layers[-1], trainable=True)
gen_param_updates = nn.adam_updates(gen_params, loss_gen, lr=lr, mom1=0.5)
print("Set up generator parameters...")
train_batch_gen = th.function(inputs=[lr], outputs=[sig1, sigloss, loss_gen], updates=gen_param_updates)
print("Set up Theano training functions.")
# Generating GAN samples from a previously trained model.
if tdg_load:
print("Setting up Theano...")
disc_params = ll.get_all_params(disc_layers, trainable=True)
print("Set up discriminator parameters...")
samplefun = th.function(inputs=[], outputs=gen_dat)
print("Set up sampling function...")
gen_params = ll.get_all_params(gen_layers[-1], trainable=True)
print("Set up generator parameters...")
#f = np.load(args.load_dir + load_disc_param + '1180.npz')
#param_values = [f['arr_%d' % i] for i in range(len(f.files))]
#for i, p in enumerate(disc_params):
# p.set_value(param_values[i])
#print("Loaded discriminator parameters.")
for fepoch in param_flist:
f = np.load(args.load_dir + load_gen_param + fepoch + '.npz')
param_dict[fepoch] = [f['arr_%d' % i] for i in range(len(f.files))]
for i, p in enumerate(gen_params):
p.set_value(param_dict[fepoch][i])
print("Loaded generator parameters.")
noise = theano_rng.normal(size=noise_dim)
sample_input = th.tensor.zeros(noise_dim)
print("Setting up sampling...")
gen_layers[0].input_var = sample_input
gen_dat = ll.get_output(gen_layers[-1], deterministic=False)
samplefun = th.function(inputs=[], outputs=gen_dat)
print("Generating samples...")
sample_x = samplefun()
img_bhwc = np.transpose(sample_x[:100, ], (0, 2, 3, 1))
img_tile = plotting.img_tile(img_bhwc, aspect_ratio=1.0, border_color=1.0, stretch=True)
img = plotting.plot_img(img_tile, title='Terraria samples')
plotting.plt.savefig(load_results_dir + '/dg_terraria_sample_minibatch_sigma_rows_all_' + fepoch + '.png')
print("Saved samples to ", load_results_dir)
sys.exit()
inds = rng.permutation(trainx.shape[0])
trainx = trainx[inds]
trainy = trainy[inds]
# Uncomment this block when training on the entire dataset
'''
txs = []
tys = []
for j in range(10):
txs.append(trainx[trainy==j][:args.count])
tys.append(trainy[trainy==j][:args.count])
txs = np.concatenate(txs, axis=0)
tys = np.concatenate(tys, axis=0)
'''
#(Block End)
a = []
#Training.
print("Starting training...")
for epoch in range(1200):
begin = time.time()
lr = np.cast[th.config.floatX](args.learning_rate * np.minimum(3. - epoch / 400., 1.))
# Uncomment this block when training on the entire dataset
'''
# construct randomly permuted minibatches
trainx = []
trainy = []
for t in range(int(np.ceil(trainx_unl.shape[0]/float(txs.shape[0])))):
inds = rng.permutation(txs.shape[0])
trainx.append(txs[inds])
trainy.append(tys[inds])
trainx = np.concatenate(trainx, axis=0)
trainy = np.concatenate(trainy, axis=0)
trainx_unl = trainx_unl[rng.permutation(trainx_unl.shape[0])]
'''
#(Block End)
if epoch == 0:
init_param(trainx[:500]) # data based initialization
# train
loss_lab = 0.
loss_unl = 0.
train_err = 0.
for t in range(nr_batches_train):
ll, lu, te = train_batch_disc(trainx[t * args.batch_size:(t + 1) * args.batch_size],
trainy[t * args.batch_size:(t + 1) * args.batch_size],
trainx_unl[t * args.batch_size:(t + 1) * args.batch_size], lr)
loss_lab += ll
loss_unl += lu
train_err += te
for rep in range(3):
sigm, sigmloss, genloss = train_batch_gen(lr)
loss_lab /= nr_batches_train
loss_unl /= nr_batches_train
train_err /= nr_batches_train
# test
test_err = 0.
for t in range(nr_batches_test):
test_err += test_batch(testx[t * args.batch_size:(t + 1) * args.batch_size],
testy[t * args.batch_size:(t + 1) * args.batch_size])
test_err /= nr_batches_test
# report
print(
|
random_line_split
|
||
dg_terraria.py
|
= T.mean(T.neq(T.argmax(output_before_softmax_lab, axis=1), labels))
#Error.
output_before_softmax = ll.get_output(disc_layers[-1], x_lab, deterministic=True)
test_err = T.mean(T.neq(T.argmax(output_before_softmax, axis=1), labels))
print("Finished setting up cost functions.")
#Training set-up.
if tdg_train:
print("Setting up Theano training...")
lr = T.scalar()
disc_params = ll.get_all_params(disc_layers, trainable=True)
disc_param_updates = nn.adam_updates(disc_params, loss_lab + args.unlabeled_weight * loss_unl, lr=lr, mom1=0.5)
disc_param_avg = [th.shared(np.cast[th.config.floatX](0. * p.get_value())) for p in disc_params]
print("Set up discriminator parameters...")
disc_avg_updates = [(a, a + 0.0001 * (p - a)) for p, a in zip(disc_params, disc_param_avg)]
print("Set up discriminator updates...")
disc_avg_givens = [(p, a) for p, a in zip(disc_params, disc_param_avg)]
print("Set up discriminator averages...")
init_param = th.function(inputs=[x_lab], outputs=None, updates=init_updates)
print("Initialized discriminator updates...")
train_batch_disc = th.function(inputs=[x_lab, labels, x_unl, lr], outputs=[loss_lab, loss_unl, train_err],
updates=disc_param_updates + disc_avg_updates)
print("Initialized discriminator training batch...")
test_batch = th.function(inputs=[x_lab, labels], outputs=test_err, givens=disc_avg_givens)
print("Initialized discriminator test batch...")
samplefun = th.function(inputs=[], outputs=gen_dat)
print("Set up Theano training for discriminator. Setting up for generator...")
# Theano functions for training the gen net
loss_gen = -T.mean(T.nnet.softplus(l_gen))
gen_params = ll.get_all_params(gen_layers[-1], trainable=True)
gen_param_updates = nn.adam_updates(gen_params, loss_gen, lr=lr, mom1=0.5)
print("Set up generator parameters...")
train_batch_gen = th.function(inputs=[lr], outputs=[sig1, sigloss, loss_gen], updates=gen_param_updates)
print("Set up Theano training functions.")
# Generating GAN samples from a previously trained model.
if tdg_load:
print("Setting up Theano...")
disc_params = ll.get_all_params(disc_layers, trainable=True)
print("Set up discriminator parameters...")
samplefun = th.function(inputs=[], outputs=gen_dat)
print("Set up sampling function...")
gen_params = ll.get_all_params(gen_layers[-1], trainable=True)
print("Set up generator parameters...")
#f = np.load(args.load_dir + load_disc_param + '1180.npz')
#param_values = [f['arr_%d' % i] for i in range(len(f.files))]
#for i, p in enumerate(disc_params):
# p.set_value(param_values[i])
#print("Loaded discriminator parameters.")
for fepoch in param_flist:
f = np.load(args.load_dir + load_gen_param + fepoch + '.npz')
param_dict[fepoch] = [f['arr_%d' % i] for i in range(len(f.files))]
for i, p in enumerate(gen_params):
p.set_value(param_dict[fepoch][i])
print("Loaded generator parameters.")
noise = theano_rng.normal(size=noise_dim)
sample_input = th.tensor.zeros(noise_dim)
print("Setting up sampling...")
gen_layers[0].input_var = sample_input
gen_dat = ll.get_output(gen_layers[-1], deterministic=False)
samplefun = th.function(inputs=[], outputs=gen_dat)
print("Generating samples...")
sample_x = samplefun()
img_bhwc = np.transpose(sample_x[:100, ], (0, 2, 3, 1))
img_tile = plotting.img_tile(img_bhwc, aspect_ratio=1.0, border_color=1.0, stretch=True)
img = plotting.plot_img(img_tile, title='Terraria samples')
plotting.plt.savefig(load_results_dir + '/dg_terraria_sample_minibatch_sigma_rows_all_' + fepoch + '.png')
print("Saved samples to ", load_results_dir)
sys.exit()
inds = rng.permutation(trainx.shape[0])
trainx = trainx[inds]
trainy = trainy[inds]
# Uncomment this block when training on the entire dataset
'''
txs = []
tys = []
for j in range(10):
txs.append(trainx[trainy==j][:args.count])
tys.append(trainy[trainy==j][:args.count])
txs = np.concatenate(txs, axis=0)
tys = np.concatenate(tys, axis=0)
'''
#(Block End)
a = []
#Training.
print("Starting training...")
for epoch in range(1200):
begin = time.time()
lr = np.cast[th.config.floatX](args.learning_rate * np.minimum(3. - epoch / 400., 1.))
# Uncomment this block when training on the entire dataset
'''
# construct randomly permuted minibatches
trainx = []
trainy = []
for t in range(int(np.ceil(trainx_unl.shape[0]/float(txs.shape[0])))):
inds = rng.permutation(txs.shape[0])
trainx.append(txs[inds])
trainy.append(tys[inds])
trainx = np.concatenate(trainx, axis=0)
trainy = np.concatenate(trainy, axis=0)
trainx_unl = trainx_unl[rng.permutation(trainx_unl.shape[0])]
'''
#(Block End)
if epoch == 0:
init_param(trainx[:500]) # data based initialization
# train
loss_lab = 0.
loss_unl = 0.
train_err = 0.
for t in range(nr_batches_train):
ll, lu, te = train_batch_disc(trainx[t * args.batch_size:(t + 1) * args.batch_size],
trainy[t * args.batch_size:(t + 1) * args.batch_size],
trainx_unl[t * args.batch_size:(t + 1) * args.batch_size], lr)
loss_lab += ll
loss_unl += lu
train_err += te
for rep in range(3):
sigm, sigmloss, genloss = train_batch_gen(lr)
loss_lab /= nr_batches_train
loss_unl /= nr_batches_train
train_err /= nr_batches_train
# test
test_err = 0.
for t in range(nr_batches_test):
test_err += test_batch(testx[t * args.batch_size:(t + 1) * args.batch_size],
testy[t * args.batch_size:(t + 1) * args.batch_size])
test_err /= nr_batches_test
# report
print(
"Iteration %d, time = %ds, loss_lab = %.4f, loss_unl = %.4f, train err= %.4f, test err = %.4f, gen_loss = %.4f, sigloss = %.4f" % (
epoch, time.time() - begin, loss_lab, loss_unl, train_err, test_err, genloss, sigmloss))
sys.stdout.flush()
a.append([epoch, loss_lab, loss_unl, train_err, test_err, genloss, sigmloss])
# generate samples from the model
sample_x = samplefun()
img_bhwc = np.transpose(sample_x[:100, ], (0, 2, 3, 1))
img_tile = plotting.img_tile(img_bhwc, aspect_ratio=1.0, border_color=1.0, stretch=True)
img = plotting.plot_img(img_tile, title='Terraria samples')
plotting.plt.savefig(args.results_dir + '/dg_terraria_sample_minibatch.png')
if epoch % 20 == 0:
|
NNdiff = np.sum(
np.sum(np.sum(np.square(np.expand_dims(sample_x, axis=1) - np.expand_dims(trainx, axis=0)), axis=2),
axis=2), axis=2)
NN = trainx[np.argmin(NNdiff, axis=1)]
NN = np.transpose(NN[:100], (0, 2, 3, 1))
NN_tile = plotting.img_tile(NN, aspect_ratio=1.0, border_color=1.0, stretch=True)
img_tile = np.concatenate((img_tile, NN_tile), axis=1)
img = plotting.plot_img(img_tile, title='Terraria samples')
plotting.plt.savefig(args.results_dir + '/' + str(epoch) + '.png')
# save params
np.savez(args.results_dir + '/disc_params' + str(epoch) + '.npz', *[p.get_value() for p in disc_params])
np.savez(args.results_dir + '/gen_params' + str(epoch) + '.npz', *[p.get_value() for p in gen_params])
np.save(args.results_dir + '/train/errors.npy', a)
np.save(args.results_dir + '/train/sig.npy', sigm)
|
conditional_block
|
|
builtin_fn_misc.go
|
// ```elvish-transcript
// ~> var f = (constantly lorem ipsum)
// ~> $f
// ▶ lorem
// ▶ ipsum
// ```
//
// The above example is equivalent to simply `var f = { put lorem ipsum }`;
// it is most useful when the argument is **not** a literal value, e.g.
//
// ```elvish-transcript
// ~> var f = (constantly (uname))
// ~> $f
// ▶ Darwin
// ~> $f
// ▶ Darwin
// ```
//
// The above code only calls `uname` once when defining `$f`. In contrast, if
// `$f` is defined as `var f = { put (uname) }`, every time you invoke `$f`,
// `uname` will be called.
//
// Etymology: [Clojure](https://clojuredocs.org/clojure.core/constantly).
func constantly(args ...interface{}) Callable {
// TODO(xiaq): Repr of this function is not right.
return NewGoFn(
"created by constantly",
func(fm *Frame) error {
out := fm.ValueOutput()
for _, v := range args {
err := out.Put(v)
if err != nil {
return err
}
}
return nil
},
)
}
//elvdoc:fn call
//
// ```elvish
// call $fn $args $opts
// ```
//
// Calls `$fn` with `$args` as the arguments, and `$opts` as the option. Useful
// for calling a function with dynamic option keys.
//
// Example:
//
// ```elvish-transcript
// ~> var f = {|a &k1=v1 &k2=v2| put $a $k1 $k2 }
// ~> call $f [foo] [&k1=bar]
// ▶ foo
// ▶ bar
// ▶ v2
// ```
func call(fm *Frame, fn Callable, argsVal vals.List, optsVal vals.Map) error {
args := make([]interface{}, 0, argsVal.Len())
for it := argsVal.Iterator(); it.HasElem(); it.Next() {
args = append(args, it.Elem())
}
opts := make(map[string]interface{}, optsVal.Len())
for it := optsVal.Iterator(); it.HasElem(); it.Next() {
k, v := it.Elem()
ks, ok := k.(string)
if !ok {
return errs.BadValue{What: "option key",
Valid: "string", Actual: vals.Kind(k)}
}
opts[ks] = v
}
return fn.Call(fm.Fork("-call"), args, opts)
}
//elvdoc:fn resolve
//
// ```elvish
// resolve $command
// ```
//
// Output what `$command` resolves to in symbolic form. Command resolution is
// described in the [language reference](language.html#ordinary-command).
//
// Example:
//
// ```elvish-transcript
// ~> resolve echo
// ▶ <builtin echo>
// ~> fn f { }
// ~> resolve f
// ▶ <closure 0xc4201c24d0>
// ~> resolve cat
// ▶ <external cat>
// ```
func resolve(fm *Frame, head string) string {
special, fnRef := resolveCmdHeadInternally(fm, head, nil)
switch {
case special != nil:
return "special"
case fnRef != nil:
return "$" + head + FnSuffix
default:
return "(external " + parse.Quote(head) + ")"
}
}
//elvdoc:fn eval
//
// ```elvish
// eval $code &ns=$nil &on-end=$nil
// ```
//
// Evaluates `$code`, which should be a string. The evaluation happens in a
// new, restricted namespace, whose initial set of variables can be specified by
// the `&ns` option. After evaluation completes, the new namespace is passed to
// the callback specified by `&on-end` if it is not nil.
//
// The namespace specified by `&ns` is never modified; it will not be affected
// by the creation or deletion of variables by `$code`. However, the values of
// the variables may be mutated by `$code`.
//
// If the `&ns` option is `$nil` (the default), a temporary namespace built by
// amalgamating the local and upvalue scopes of the caller is used.
//
// If `$code` fails to parse or compile, the parse error or compilation error is
// raised as an exception.
//
// Basic examples that do not modify the namespace or any variable:
//
// ```elvish-transcript
// ~> eval 'put x'
// ▶ x
// ~> var x = foo
// ~> eval 'put $x'
// ▶ foo
// ~> var ns = (ns [&x=bar])
// ~> eval &ns=$ns 'put $x'
// ▶ bar
// ```
//
// Examples that modify existing variables:
//
// ```elvish-transcript
// ~> var y = foo
// ~> eval 'set y = bar'
// ~> put $y
// ▶ bar
// ```
//
// Examples that creates new variables and uses the callback to access it:
//
// ```elvish-transcript
// ~> eval 'var z = lorem'
// ~> put $z
// compilation error: variable $z not found
// [ttz 2], line 1: put $z
// ~> var saved-ns = $nil
// ~> eval &on-end={|ns| set saved-ns = $ns } 'var z = lorem'
// ~> put $saved-ns[z]
// ▶ lorem
// ```
//
// Note that when using variables from an outer scope, only those
// that have been referenced are captured as upvalues (see [closure
// semantics](language.html#closure-semantics)) and thus accessible to `eval`:
//
// ```elvish-transcript
// ~> var a b
// ~> fn f {|code| nop $a; eval $code }
// ~> f 'echo $a'
// $nil
// ~> f 'echo $b'
// Exception: compilation error: variable $b not found
// [eval 2], line 1: echo $b
// Traceback: [... omitted ...]
// ```
type evalOpts struct {
Ns *Ns
OnEnd Callable
}
func (*evalOpts) SetDefaultOptions() {}
func eval(fm *Frame, opts evalOpts, code string) error {
src := parse.Source{Name: fmt.Sprintf("[eval %d]", nextEvalCount()), Code: code}
ns := opts.Ns
if ns == nil {
ns = CombineNs(fm.up, fm.local)
}
// The stacktrace already contains the line that calls "eval", so we pass
// nil as the second argument.
newNs, exc := fm.Eval(src, nil, ns)
if opts.OnEnd != nil {
newFm := fm.Fork("on-end callbac
|
unique names for each source passed to eval.
var (
evalCount int
evalCountMutex sync.Mutex
)
func nextEvalCount() int {
evalCountMutex.Lock()
defer evalCountMutex.Unlock()
evalCount++
return evalCount
}
//elvdoc:fn use-mod
//
// ```elvish
// use-mod $use-spec
// ```
//
// Imports a module, and outputs the namespace for the module.
//
// Most code should use the [use](language.html#importing-modules-with-use)
// special command instead.
//
// Examples:
//
// ```elvish-transcript
// ~> echo 'var x = value' > a.elv
// ~> put (use-mod ./a)[x]
// ▶ value
// ```
func useMod(fm *Frame, spec string) (*Ns, error) {
return use(fm, spec, nil)
}
func readFileUTF8(fname string) (string, error) {
bytes, err := os.ReadFile(fname)
if err != nil {
return "", err
}
if !utf8.Valid(bytes) {
return "", fmt.Errorf("%s: source is not valid UTF-8", fname)
}
return string(bytes), nil
}
//elvdoc:fn deprecate
//
// ```elvish
// deprecate $msg
// ```
//
// Shows the given deprecation message to stderr. If called from a function
// or module, also shows the call site of the function or import site of the
// module. Does nothing if the combination of the call site and the message has
// been shown before.
//
// ```elvish-transcript
// ~> deprecate msg
// deprecation: msg
// ~> fn f { deprecate msg }
// ~> f
// deprecation: msg
// [tty 19], line 1: f
// ~> exec
// ~> deprecate msg
// deprecation: msg
// ~> fn f { deprecate msg }
// ~> f
// deprecation: msg
// [tty 3], line 1: f
// ~> f # a different call site; shows deprecate message
// deprecation: msg
// [tty 4], line 1: f
// ~> fn g { f }
//
|
k of eval")
errCb := opts.OnEnd.Call(newFm, []interface{}{newNs}, NoOpts)
if exc == nil {
return errCb
}
}
return exc
}
// Used to generate
|
conditional_block
|
builtin_fn_misc.go
|
// ```elvish-transcript
// ~> var f = (constantly lorem ipsum)
// ~> $f
// ▶ lorem
// ▶ ipsum
// ```
//
// The above example is equivalent to simply `var f = { put lorem ipsum }`;
// it is most useful when the argument is **not** a literal value, e.g.
//
// ```elvish-transcript
// ~> var f = (constantly (uname))
// ~> $f
// ▶ Darwin
// ~> $f
// ▶ Darwin
// ```
//
// The above code only calls `uname` once when defining `$f`. In contrast, if
// `$f` is defined as `var f = { put (uname) }`, every time you invoke `$f`,
// `uname` will be called.
//
// Etymology: [Clojure](https://clojuredocs.org/clojure.core/constantly).
func constantly(args ...interface{}) Callable {
// TODO(xiaq): Repr of this function is not right.
return NewGoFn(
"created by constantly",
func(fm *Frame) error {
out := fm.ValueOutput()
for _, v := range args {
err := out.Put(v)
if err != nil {
return err
}
}
return nil
},
)
}
//elvdoc:fn call
//
// ```elvish
// call $fn $args $opts
// ```
//
// Calls `$fn` with `$args` as the arguments, and `$opts` as the option. Useful
// for calling a function with dynamic option keys.
//
// Example:
//
// ```elvish-transcript
// ~> var f = {|a &k1=v1 &k2=v2| put $a $k1 $k2 }
// ~> call $f [foo] [&k1=bar]
// ▶ foo
// ▶ bar
// ▶ v2
// ```
func call(fm *Frame, fn Callable, argsVal vals.List, optsVal vals.Map) error {
args := make([]interface{}, 0, argsVal.Len())
for it := argsVal.Iterator(); it.HasElem(); it.Next() {
args = append(args, it.Elem())
}
opts := make(map[string]interface{}, optsVal.Len())
for it := optsVal.Iterator(); it.HasElem(); it.Next() {
k, v := it.Elem()
ks, ok := k.(string)
if !ok {
return errs.BadValue{What: "option key",
Valid: "string", Actual: vals.Kind(k)}
}
opts[ks] = v
}
return fn.Call(fm.Fork("-call"), args, opts)
}
//elvdoc:fn resolve
//
// ```elvish
// resolve $command
// ```
//
// Output what `$command` resolves to in symbolic form. Command resolution is
// described in the [language reference](language.html#ordinary-command).
//
// Example:
//
// ```elvish-transcript
// ~> resolve echo
// ▶ <builtin echo>
// ~> fn f { }
// ~> resolve f
// ▶ <closure 0xc4201c24d0>
// ~> resolve cat
// ▶ <external cat>
// ```
func resolve(fm *Frame, head string) string {
special, fnRef := resolveCmdHeadInternally(fm, head, nil)
switch {
case special != nil:
return "special"
case fnRef != nil:
return "$" + head + FnSuffix
default:
return "(external " + parse.Quote(head) + ")"
}
}
//elvdoc:fn eval
//
// ```elvish
// eval $code &ns=$nil &on-end=$nil
// ```
//
// Evaluates `$code`, which should be a string. The evaluation happens in a
// new, restricted namespace, whose initial set of variables can be specified by
// the `&ns` option. After evaluation completes, the new namespace is passed to
// the callback specified by `&on-end` if it is not nil.
//
// The namespace specified by `&ns` is never modified; it will not be affected
// by the creation or deletion of variables by `$code`. However, the values of
// the variables may be mutated by `$code`.
//
// If the `&ns` option is `$nil` (the default), a temporary namespace built by
// amalgamating the local and upvalue scopes of the caller is used.
//
// If `$code` fails to parse or compile, the parse error or compilation error is
// raised as an exception.
//
// Basic examples that do not modify the namespace or any variable:
//
// ```elvish-transcript
// ~> eval 'put x'
// ▶ x
// ~> var x = foo
// ~> eval 'put $x'
// ▶ foo
// ~> var ns = (ns [&x=bar])
// ~> eval &ns=$ns 'put $x'
// ▶ bar
// ```
//
// Examples that modify existing variables:
//
// ```elvish-transcript
// ~> var y = foo
// ~> eval 'set y = bar'
// ~> put $y
// ▶ bar
// ```
//
// Examples that creates new variables and uses the callback to access it:
//
// ```elvish-transcript
// ~> eval 'var z = lorem'
// ~> put $z
// compilation error: variable $z not found
// [ttz 2], line 1: put $z
// ~> var saved-ns = $nil
// ~> eval &on-end={|ns| set saved-ns = $ns } 'var z = lorem'
// ~> put $saved-ns[z]
// ▶ lorem
// ```
//
// Note that when using variables from an outer scope, only those
// that have been referenced are captured as upvalues (see [closure
// semantics](language.html#closure-semantics)) and thus accessible to `eval`:
//
// ```elvish-transcript
// ~> var a b
// ~> fn f {|code| nop $a; eval $code }
// ~> f 'echo $a'
// $nil
// ~> f 'echo $b'
// Exception: compilation error: variable $b not found
// [eval 2], line 1: echo $b
// Traceback: [... omitted ...]
// ```
type evalOpts struct {
Ns *Ns
OnEnd Callable
}
func (*evalOpts) SetDefaultOptions() {}
func eval(fm
|
lOpts, code string) error {
src := parse.Source{Name: fmt.Sprintf("[eval %d]", nextEvalCount()), Code: code}
ns := opts.Ns
if ns == nil {
ns = CombineNs(fm.up, fm.local)
}
// The stacktrace already contains the line that calls "eval", so we pass
// nil as the second argument.
newNs, exc := fm.Eval(src, nil, ns)
if opts.OnEnd != nil {
newFm := fm.Fork("on-end callback of eval")
errCb := opts.OnEnd.Call(newFm, []interface{}{newNs}, NoOpts)
if exc == nil {
return errCb
}
}
return exc
}
// Used to generate unique names for each source passed to eval.
var (
evalCount int
evalCountMutex sync.Mutex
)
func nextEvalCount() int {
evalCountMutex.Lock()
defer evalCountMutex.Unlock()
evalCount++
return evalCount
}
//elvdoc:fn use-mod
//
// ```elvish
// use-mod $use-spec
// ```
//
// Imports a module, and outputs the namespace for the module.
//
// Most code should use the [use](language.html#importing-modules-with-use)
// special command instead.
//
// Examples:
//
// ```elvish-transcript
// ~> echo 'var x = value' > a.elv
// ~> put (use-mod ./a)[x]
// ▶ value
// ```
func useMod(fm *Frame, spec string) (*Ns, error) {
return use(fm, spec, nil)
}
func readFileUTF8(fname string) (string, error) {
bytes, err := os.ReadFile(fname)
if err != nil {
return "", err
}
if !utf8.Valid(bytes) {
return "", fmt.Errorf("%s: source is not valid UTF-8", fname)
}
return string(bytes), nil
}
//elvdoc:fn deprecate
//
// ```elvish
// deprecate $msg
// ```
//
// Shows the given deprecation message to stderr. If called from a function
// or module, also shows the call site of the function or import site of the
// module. Does nothing if the combination of the call site and the message has
// been shown before.
//
// ```elvish-transcript
// ~> deprecate msg
// deprecation: msg
// ~> fn f { deprecate msg }
// ~> f
// deprecation: msg
// [tty 19], line 1: f
// ~> exec
// ~> deprecate msg
// deprecation: msg
// ~> fn f { deprecate msg }
// ~> f
// deprecation: msg
// [tty 3], line 1: f
// ~> f # a different call site; shows deprecate message
// deprecation: msg
// [tty 4], line 1: f
// ~> fn g { f }
// ~>
|
*Frame, opts eva
|
identifier_name
|
builtin_fn_misc.go
|
// For rand and randint.
rand.Seed(time.Now().UTC().UnixNano())
}
//elvdoc:fn nop
//
// ```elvish
// nop &any-opt= $value...
// ```
//
// Accepts arbitrary arguments and options and does exactly nothing.
//
// Examples:
//
// ```elvish-transcript
// ~> nop
// ~> nop a b c
// ~> nop &k=v
// ```
//
// Etymology: Various languages, in particular NOP in
// [assembly languages](https://en.wikipedia.org/wiki/NOP).
func nop(opts RawOptions, args ...interface{}) {
// Do nothing
}
//elvdoc:fn kind-of
//
// ```elvish
// kind-of $value...
// ```
//
// Output the kinds of `$value`s. Example:
//
// ```elvish-transcript
// ~> kind-of lorem [] [&]
// ▶ string
// ▶ list
// ▶ map
// ```
//
// The terminology and definition of "kind" is subject to change.
func kindOf(fm *Frame, args ...interface{}) error {
out := fm.ValueOutput()
for _, a := range args {
err := out.Put(vals.Kind(a))
if err != nil {
return err
}
}
return nil
}
//elvdoc:fn constantly
//
// ```elvish
// constantly $value...
// ```
//
// Output a function that takes no arguments and outputs `$value`s when called.
// Examples:
//
// ```elvish-transcript
// ~> var f = (constantly lorem ipsum)
// ~> $f
// ▶ lorem
// ▶ ipsum
// ```
//
// The above example is equivalent to simply `var f = { put lorem ipsum }`;
// it is most useful when the argument is **not** a literal value, e.g.
//
// ```elvish-transcript
// ~> var f = (constantly (uname))
// ~> $f
// ▶ Darwin
// ~> $f
// ▶ Darwin
// ```
//
// The above code only calls `uname` once when defining `$f`. In contrast, if
// `$f` is defined as `var f = { put (uname) }`, every time you invoke `$f`,
// `uname` will be called.
//
// Etymology: [Clojure](https://clojuredocs.org/clojure.core/constantly).
func constantly(args ...interface{}) Callable {
// TODO(xiaq): Repr of this function is not right.
return NewGoFn(
"created by constantly",
func(fm *Frame) error {
out := fm.ValueOutput()
for _, v := range args {
err := out.Put(v)
if err != nil {
return err
}
}
return nil
},
)
}
//elvdoc:fn call
//
// ```elvish
// call $fn $args $opts
// ```
//
// Calls `$fn` with `$args` as the arguments, and `$opts` as the option. Useful
// for calling a function with dynamic option keys.
//
// Example:
//
// ```elvish-transcript
// ~> var f = {|a &k1=v1 &k2=v2| put $a $k1 $k2 }
// ~> call $f [foo] [&k1=bar]
// ▶ foo
// ▶ bar
// ▶ v2
// ```
func call(fm *Frame, fn Callable, argsVal vals.List, optsVal vals.Map) error {
args := make([]interface{}, 0, argsVal.Len())
for it := argsVal.Iterator(); it.HasElem(); it.Next() {
args = append(args, it.Elem())
}
opts := make(map[string]interface{}, optsVal.Len())
for it := optsVal.Iterator(); it.HasElem(); it.Next() {
k, v := it.Elem()
ks, ok := k.(string)
if !ok {
return errs.BadValue{What: "option key",
Valid: "string", Actual: vals.Kind(k)}
}
opts[ks] = v
}
return fn.Call(fm.Fork("-call"), args, opts)
}
//elvdoc:fn resolve
//
// ```elvish
// resolve $command
// ```
//
// Output what `$command` resolves to in symbolic form. Command resolution is
// described in the [language reference](language.html#ordinary-command).
//
// Example:
//
// ```elvish-transcript
// ~> resolve echo
// ▶ <builtin echo>
// ~> fn f { }
// ~> resolve f
// ▶ <closure 0xc4201c24d0>
// ~> resolve cat
// ▶ <external cat>
// ```
func resolve(fm *Frame, head string) string {
special, fnRef := resolveCmdHeadInternally(fm, head, nil)
switch {
case special != nil:
return "special"
case fnRef != nil:
return "$" + head + FnSuffix
default:
return "(external " + parse.Quote(head) + ")"
}
}
//elvdoc:fn eval
//
// ```elvish
// eval $code &ns=$nil &on-end=$nil
// ```
//
// Evaluates `$code`, which should be a string. The evaluation happens in a
// new, restricted namespace, whose initial set of variables can be specified by
// the `&ns` option. After evaluation completes, the new namespace is passed to
// the callback specified by `&on-end` if it is not nil.
//
// The namespace specified by `&ns` is never modified; it will not be affected
// by the creation or deletion of variables by `$code`. However, the values of
// the variables may be mutated by `$code`.
//
// If the `&ns` option is `$nil` (the default), a temporary namespace built by
// amalgamating the local and upvalue scopes of the caller is used.
//
// If `$code` fails to parse or compile, the parse error or compilation error is
// raised as an exception.
//
// Basic examples that do not modify the namespace or any variable:
//
// ```elvish-transcript
// ~> eval 'put x'
// ▶ x
// ~> var x = foo
// ~> eval 'put $x'
// ▶ foo
// ~> var ns = (ns [&x=bar])
// ~> eval &ns=$ns 'put $x'
// ▶ bar
// ```
//
// Examples that modify existing variables:
//
// ```elvish-transcript
// ~> var y = foo
// ~> eval 'set y = bar'
// ~> put $y
// ▶ bar
// ```
//
// Examples that creates new variables and uses the callback to access it:
//
// ```elvish-transcript
// ~> eval 'var z = lorem'
// ~> put $z
// compilation error: variable $z not found
// [ttz 2], line 1: put $z
// ~> var saved-ns = $nil
// ~> eval &on-end={|ns| set saved-ns = $ns } 'var z = lorem'
// ~> put $saved-ns[z]
// ▶ lorem
// ```
//
// Note that when using variables from an outer scope, only those
// that have been referenced are captured as upvalues (see [closure
// semantics](language.html#closure-semantics)) and thus accessible to `eval`:
//
// ```elvish-transcript
// ~> var a b
// ~> fn f {|code| nop $a; eval $code }
// ~> f 'echo $a'
// $nil
// ~> f 'echo $b'
// Exception: compilation error: variable $b not found
// [eval 2], line 1: echo $b
// Traceback: [... omitted ...]
// ```
type evalOpts struct {
Ns *Ns
OnEnd Callable
}
func (*evalOpts) SetDefaultOptions() {}
func eval(fm *Frame, opts evalOpts, code string) error {
src := parse.Source{Name: fmt.Sprintf("[eval %d]", nextEvalCount()), Code: code}
ns := opts.Ns
if ns == nil {
ns = CombineNs(fm.up, fm.local)
}
// The stacktrace already contains the line that calls "eval", so we pass
// nil as the second argument.
newNs, exc := fm.Eval(src, nil, ns)
if opts.OnEnd != nil {
newFm := fm.Fork("on-end callback of eval")
errCb := opts.OnEnd.Call(newFm, []interface{}{newNs}, NoOpts)
if exc == nil {
return errCb
}
}
return exc
}
// Used to generate unique names for each source passed to eval.
var (
evalCount int
evalCountMutex sync.Mutex
)
func nextEvalCount() int {
evalCountMutex.Lock()
defer evalCountMutex.Unlock()
evalCount
|
{
addBuiltinFns(map[string]interface{}{
"nop": nop,
"kind-of": kindOf,
"constantly": constantly,
// Introspection
"call": call,
"resolve": resolve,
"eval": eval,
"use-mod": useMod,
"deprecate": deprecate,
// Time
"sleep": sleep,
"time": timeCmd,
"-ifaddrs": _ifaddrs,
})
|
identifier_body
|
|
builtin_fn_misc.go
|
//
// ```elvish-transcript
// ~> var f = (constantly lorem ipsum)
// ~> $f
// ▶ lorem
// ▶ ipsum
// ```
//
// The above example is equivalent to simply `var f = { put lorem ipsum }`;
// it is most useful when the argument is **not** a literal value, e.g.
//
// ```elvish-transcript
// ~> var f = (constantly (uname))
// ~> $f
// ▶ Darwin
// ~> $f
// ▶ Darwin
// ```
//
// The above code only calls `uname` once when defining `$f`. In contrast, if
// `$f` is defined as `var f = { put (uname) }`, every time you invoke `$f`,
// `uname` will be called.
//
// Etymology: [Clojure](https://clojuredocs.org/clojure.core/constantly).
func constantly(args ...interface{}) Callable {
// TODO(xiaq): Repr of this function is not right.
return NewGoFn(
"created by constantly",
func(fm *Frame) error {
out := fm.ValueOutput()
for _, v := range args {
err := out.Put(v)
if err != nil {
return err
}
}
return nil
},
)
}
//elvdoc:fn call
//
// ```elvish
// call $fn $args $opts
// ```
//
// Calls `$fn` with `$args` as the arguments, and `$opts` as the option. Useful
// for calling a function with dynamic option keys.
//
// Example:
//
// ```elvish-transcript
|
// ~> var f = {|a &k1=v1 &k2=v2| put $a $k1 $k2 }
// ~> call $f [foo] [&k1=bar]
// ▶ foo
// ▶ bar
// ▶ v2
// ```
func call(fm *Frame, fn Callable, argsVal vals.List, optsVal vals.Map) error {
args := make([]interface{}, 0, argsVal.Len())
for it := argsVal.Iterator(); it.HasElem(); it.Next() {
args = append(args, it.Elem())
}
opts := make(map[string]interface{}, optsVal.Len())
for it := optsVal.Iterator(); it.HasElem(); it.Next() {
k, v := it.Elem()
ks, ok := k.(string)
if !ok {
return errs.BadValue{What: "option key",
Valid: "string", Actual: vals.Kind(k)}
}
opts[ks] = v
}
return fn.Call(fm.Fork("-call"), args, opts)
}
//elvdoc:fn resolve
//
// ```elvish
// resolve $command
// ```
//
// Output what `$command` resolves to in symbolic form. Command resolution is
// described in the [language reference](language.html#ordinary-command).
//
// Example:
//
// ```elvish-transcript
// ~> resolve echo
// ▶ <builtin echo>
// ~> fn f { }
// ~> resolve f
// ▶ <closure 0xc4201c24d0>
// ~> resolve cat
// ▶ <external cat>
// ```
func resolve(fm *Frame, head string) string {
special, fnRef := resolveCmdHeadInternally(fm, head, nil)
switch {
case special != nil:
return "special"
case fnRef != nil:
return "$" + head + FnSuffix
default:
return "(external " + parse.Quote(head) + ")"
}
}
//elvdoc:fn eval
//
// ```elvish
// eval $code &ns=$nil &on-end=$nil
// ```
//
// Evaluates `$code`, which should be a string. The evaluation happens in a
// new, restricted namespace, whose initial set of variables can be specified by
// the `&ns` option. After evaluation completes, the new namespace is passed to
// the callback specified by `&on-end` if it is not nil.
//
// The namespace specified by `&ns` is never modified; it will not be affected
// by the creation or deletion of variables by `$code`. However, the values of
// the variables may be mutated by `$code`.
//
// If the `&ns` option is `$nil` (the default), a temporary namespace built by
// amalgamating the local and upvalue scopes of the caller is used.
//
// If `$code` fails to parse or compile, the parse error or compilation error is
// raised as an exception.
//
// Basic examples that do not modify the namespace or any variable:
//
// ```elvish-transcript
// ~> eval 'put x'
// ▶ x
// ~> var x = foo
// ~> eval 'put $x'
// ▶ foo
// ~> var ns = (ns [&x=bar])
// ~> eval &ns=$ns 'put $x'
// ▶ bar
// ```
//
// Examples that modify existing variables:
//
// ```elvish-transcript
// ~> var y = foo
// ~> eval 'set y = bar'
// ~> put $y
// ▶ bar
// ```
//
// Examples that creates new variables and uses the callback to access it:
//
// ```elvish-transcript
// ~> eval 'var z = lorem'
// ~> put $z
// compilation error: variable $z not found
// [ttz 2], line 1: put $z
// ~> var saved-ns = $nil
// ~> eval &on-end={|ns| set saved-ns = $ns } 'var z = lorem'
// ~> put $saved-ns[z]
// ▶ lorem
// ```
//
// Note that when using variables from an outer scope, only those
// that have been referenced are captured as upvalues (see [closure
// semantics](language.html#closure-semantics)) and thus accessible to `eval`:
//
// ```elvish-transcript
// ~> var a b
// ~> fn f {|code| nop $a; eval $code }
// ~> f 'echo $a'
// $nil
// ~> f 'echo $b'
// Exception: compilation error: variable $b not found
// [eval 2], line 1: echo $b
// Traceback: [... omitted ...]
// ```
type evalOpts struct {
Ns *Ns
OnEnd Callable
}
func (*evalOpts) SetDefaultOptions() {}
func eval(fm *Frame, opts evalOpts, code string) error {
src := parse.Source{Name: fmt.Sprintf("[eval %d]", nextEvalCount()), Code: code}
ns := opts.Ns
if ns == nil {
ns = CombineNs(fm.up, fm.local)
}
// The stacktrace already contains the line that calls "eval", so we pass
// nil as the second argument.
newNs, exc := fm.Eval(src, nil, ns)
if opts.OnEnd != nil {
newFm := fm.Fork("on-end callback of eval")
errCb := opts.OnEnd.Call(newFm, []interface{}{newNs}, NoOpts)
if exc == nil {
return errCb
}
}
return exc
}
// Used to generate unique names for each source passed to eval.
var (
evalCount int
evalCountMutex sync.Mutex
)
func nextEvalCount() int {
evalCountMutex.Lock()
defer evalCountMutex.Unlock()
evalCount++
return evalCount
}
//elvdoc:fn use-mod
//
// ```elvish
// use-mod $use-spec
// ```
//
// Imports a module, and outputs the namespace for the module.
//
// Most code should use the [use](language.html#importing-modules-with-use)
// special command instead.
//
// Examples:
//
// ```elvish-transcript
// ~> echo 'var x = value' > a.elv
// ~> put (use-mod ./a)[x]
// ▶ value
// ```
func useMod(fm *Frame, spec string) (*Ns, error) {
return use(fm, spec, nil)
}
func readFileUTF8(fname string) (string, error) {
bytes, err := os.ReadFile(fname)
if err != nil {
return "", err
}
if !utf8.Valid(bytes) {
return "", fmt.Errorf("%s: source is not valid UTF-8", fname)
}
return string(bytes), nil
}
//elvdoc:fn deprecate
//
// ```elvish
// deprecate $msg
// ```
//
// Shows the given deprecation message to stderr. If called from a function
// or module, also shows the call site of the function or import site of the
// module. Does nothing if the combination of the call site and the message has
// been shown before.
//
// ```elvish-transcript
// ~> deprecate msg
// deprecation: msg
// ~> fn f { deprecate msg }
// ~> f
// deprecation: msg
// [tty 19], line 1: f
// ~> exec
// ~> deprecate msg
// deprecation: msg
// ~> fn f { deprecate msg }
// ~> f
// deprecation: msg
// [tty 3], line 1: f
// ~> f # a different call site; shows deprecate message
// deprecation: msg
// [tty 4], line 1: f
// ~> fn g { f }
// ~> g
|
random_line_split
|
|
Executor.py
|
status """
def __init__(self, config=None, **kwargs):
"""
Args:
config (dict): dictionary of config settings. will be merged with
any other provided kwargs. valid keys are:
CCDDRONEPATH (str): path to top-level of CCDDrone installation
CCDDCONFIGFILE (str): path (under CCDDrone path) to store config
CCDDMETADATAFILE (str): path (under CCDDrone path) to store metadata
EXECUTOR_LOGFILE (str): where to put logs from CCDD executables
DATAPATH (str): path to save images
LASTIMGPATH (str): path to save png of last image taken
"""
def getkey(key, default=None):
return kwargs.get(key, config.get(key, default))
self.logfilename = getkey('EXECUTOR_LOGFILE', 'logs/Executor.log')
self.logfile = None
self.process = None
self.current_exposure = None
self.max_exposures = None
self.exposethread = None
self.lastfile=None
self.lastimgpath = getkey('LASTIMGPATH', 'static/lastimg.png')
self.datapath = getkey("DATAPATH", 'data')
self.ccddpath = getkey('CCDDRONEPATH')
CCDDConfigFile = getkey('CCDDCONFIGFILE','config/Config_GUI.ini')
CCDDMetaFile = getkey('CCDDMETADATAFILE', 'config/Metadata_GUI.json')
self.imagedb_uri = getkey("IMAGEDB_URI", ImageDB.default_uri)
self.imagedb_collection = getkey("IMAGEDB_COLLECTION",
ImageDB.default_collection)
# make sure the datapath exists
if not os.path.isdir(self.datapath):
try:
os.mkdir(self.datapath)
except FileNotFoundError:
raise ValueError(f"DATAPATH '{self.datapath}' does not exist"
"and can't be created")
# make sure ccdd path is real
if not os.path.isdir(self.ccddpath):
raise ValueError(f"CCDDRONEPATH '{self.ccddpath}' doesn't exist")
# make sure it is on PATH
if self.ccddpath not in os.getenv('PATH'):
os.environ['PATH'] = os.pathsep.join([self.ccddpath,
os.getenv('PATH')])
self.outputConfig = path.abspath(path.join(self.ccddpath,
CCDDConfigFile))
self.outputMetadata = path.join(self.ccddpath, CCDDMetaFile)
log.debug("New executor created, config=%s, meta=%s, imagedb=%s/%s",
self.outputConfig, self.outputMetadata,
self.imagedb_uri, self.imagedb_collection)
def readconfig(self):
""" Get the current config file and return as string """
files = [path.join(self.ccddpath, 'do_not_touch', 'LastSettings.ini'),
path.join(self.ccddpath, 'config', 'Config.ini'),
self.outputConfig]
last = sorted(files, reverse=True,
key=lambda f: path.getmtime(f) if path.isfile(f) else 0)
log.debug("Reading config settings from %s", last[0])
try:
with open(last[0]) as f:
return f.read()
except FileNotFoundError:
return None
def saveconfig(self, newconf, apply=True):
""" Save the config settings in `newconf` to file.
Args:
newconf (str): contents of ini config file as string
apply (bool): if True, call CCDDApplyNewSettings after
"""
with open(self.outputConfig, 'w') as f:
f.write(newconf)
if apply:
self.ApplyNewSettings()
def savemetadata(self, newmeta):
""" Save the metadata to file
Args:
metadata (dict): new metadata
"""
with open(self.outputMetadata, 'w') as f:
json.dump(newmeta, f)
def getstate(self):
state = 'idle'
if self.process:
if self.process.poll() is None:
state = 'running'
elif self.process.returncode != 0:
state = 'error'
if self.current_exposure is not None:
state = 'running'
return state
def getstatus(self):
""" Get out current status as a dict """
status = dict(state=self.getstate(), runningcmd=None,
current_exposure=self.current_exposure,
max_exposures=self.max_exposures,
statustime=str(datetime.now())[:-7],
lastfile=self.lastfile)
if self.process:
status['lastcmd'] = self.process.args[0]
status['lastreturn'] = self.process.poll()
if status['state'] == 'running':
status['runningcmd'] = path.basename(self.process.args[0])
try:
with open(self.logfilename, newline='') as logfile:
ts = datetime.fromtimestamp(path.getmtime(self.logfilename))
status['cmdoutput'] = f"Last output: {str(ts)[:-7]}\n"
status['cmdoutput'] += '#'*80+'\n'
lines = logfile.readlines()
if lines and lines[-1][-1] == '\r':
lines[-1] = lines[-1][:-1]
for line in lines:
|
status['cmdoutput'] += line
except FileNotFoundError:
status['cmdoutput'] = ""
# info for the lastimg to update
status['lastimg'] = self.lastimgpath
try:
status['lastimg_timestamp'] = path.getmtime(self.lastimgpath)
except FileNotFoundError:
status['lastimg_timestamp'] = 0
return status
def endexposureloop(self):
""" Stop an ongoing exposure loop """
self.max_exposures = self.current_exposure
def abort(self, kill=False):
""" abort a currently running process """
log.warning("Received abort request")
self.current_exposure = None
if self.getstate() == 'running':
if kill:
self.process.kill()
else:
self.process.terminate()
with open(self.logfilename, 'a') as f:
print("!!!!!! process killed by user !!!!!!!", file=f)
# methods to run exectuables
def _run(self, args, cwd=None, env=None, logmode='wb'):
""" Run the commands in `args` in a subprocess """
args = tuple(str(arg) for arg in args)
if self.process and self.process.poll() is None:
raise RuntimeError("A process is already running")
if self.logfile:
self.logfile.close()
self.logfile = open(self.logfilename, logmode, buffering=0)
if env is not None:
env = dict(os.environ, **env,
PYTHONPATH=os.pathsep.join(sys.path))
self.process = subprocess.Popen(args, cwd=cwd, stdout=self.logfile,
stderr=subprocess.STDOUT, env=env)
def StartupAndErase(self):
return self._run(['./CCDDStartupAndErase', path.abspath(self.outputConfig)],
cwd=self.ccddpath)
def PerformEraseProcedure(self):
return self._run(['./CCDDPerformEraseProcedure', path.abspath(self.outputConfig)],
cwd=self.ccddpath)
def ApplyNewSettings(self, newconf=None):
if newconf:
self.saveconfig(newconf, apply=False)
return self._run(['./CCDDApplyNewSettings',
path.abspath(self.outputConfig)],
cwd=self.ccddpath)
def Expose(self, fitsfile, seconds=5):
""" Expose the CCD and read a new image to `fitsfile` """
# make sure the file has good name
if not fitsfile.endswith('.fits'):
fitsfile += '.fits'
tstamp = datetime.now().strftime('_%y%m%d-%H%M')
match = re.match(r'.*(_\d\d\d\d\d\d-\d\d\d\d)\.fits', fitsfile)
if not match:
fitsfile = fitsfile[:-5] + tstamp + '.fits'
elif match.group(1) != tstamp:
fitsfile = fitsfile[:-17] + tstamp + '.fits'
fitsfile = path.join(self.datapath, fitsfile)
self.lastfile = fitsfile
log.info("Starting new exposure, filename=%s",
path.basename(self.lastfile))
args = ['./CCDDExposeDB.py', str(seconds), fitsfile,
self.outputMetadata]
if self.lastimgpath:
args.append(self.lastimgpath)
return self._run(args,
env=dict(IMAGEDB_URI=self.imagedb_uri,
IMAGEDB_COLLECTION=self.imagedb_collection)
)
def _do_expose_loop(self, fitsfile, seconds):
""" private method to perform expose loop. Do not call directly! """
log.debug(f"Starting expose loop with {self.max_exposures} exposures")
while (self.current_exposure is not None and
self.current_exposure < self.max_exposures):
self.current_exposure += 1
self.Expose(fitsfile, seconds)
while self.process and self.process.poll() is None:
sleep(5)
if not self.process or self.process.returncode != 0
|
if not line.endswith('\r'):
|
random_line_split
|
Executor.py
|
status """
def __init__(self, config=None, **kwargs):
"""
Args:
config (dict): dictionary of config settings. will be merged with
any other provided kwargs. valid keys are:
CCDDRONEPATH (str): path to top-level of CCDDrone installation
CCDDCONFIGFILE (str): path (under CCDDrone path) to store config
CCDDMETADATAFILE (str): path (under CCDDrone path) to store metadata
EXECUTOR_LOGFILE (str): where to put logs from CCDD executables
DATAPATH (str): path to save images
LASTIMGPATH (str): path to save png of last image taken
"""
def getkey(key, default=None):
return kwargs.get(key, config.get(key, default))
self.logfilename = getkey('EXECUTOR_LOGFILE', 'logs/Executor.log')
self.logfile = None
self.process = None
self.current_exposure = None
self.max_exposures = None
self.exposethread = None
self.lastfile=None
self.lastimgpath = getkey('LASTIMGPATH', 'static/lastimg.png')
self.datapath = getkey("DATAPATH", 'data')
self.ccddpath = getkey('CCDDRONEPATH')
CCDDConfigFile = getkey('CCDDCONFIGFILE','config/Config_GUI.ini')
CCDDMetaFile = getkey('CCDDMETADATAFILE', 'config/Metadata_GUI.json')
self.imagedb_uri = getkey("IMAGEDB_URI", ImageDB.default_uri)
self.imagedb_collection = getkey("IMAGEDB_COLLECTION",
ImageDB.default_collection)
# make sure the datapath exists
if not os.path.isdir(self.datapath):
try:
os.mkdir(self.datapath)
except FileNotFoundError:
raise ValueError(f"DATAPATH '{self.datapath}' does not exist"
"and can't be created")
# make sure ccdd path is real
if not os.path.isdir(self.ccddpath):
raise ValueError(f"CCDDRONEPATH '{self.ccddpath}' doesn't exist")
# make sure it is on PATH
if self.ccddpath not in os.getenv('PATH'):
os.environ['PATH'] = os.pathsep.join([self.ccddpath,
os.getenv('PATH')])
self.outputConfig = path.abspath(path.join(self.ccddpath,
CCDDConfigFile))
self.outputMetadata = path.join(self.ccddpath, CCDDMetaFile)
log.debug("New executor created, config=%s, meta=%s, imagedb=%s/%s",
self.outputConfig, self.outputMetadata,
self.imagedb_uri, self.imagedb_collection)
def readconfig(self):
""" Get the current config file and return as string """
files = [path.join(self.ccddpath, 'do_not_touch', 'LastSettings.ini'),
path.join(self.ccddpath, 'config', 'Config.ini'),
self.outputConfig]
last = sorted(files, reverse=True,
key=lambda f: path.getmtime(f) if path.isfile(f) else 0)
log.debug("Reading config settings from %s", last[0])
try:
with open(last[0]) as f:
return f.read()
except FileNotFoundError:
return None
def saveconfig(self, newconf, apply=True):
""" Save the config settings in `newconf` to file.
Args:
newconf (str): contents of ini config file as string
apply (bool): if True, call CCDDApplyNewSettings after
"""
with open(self.outputConfig, 'w') as f:
f.write(newconf)
if apply:
self.ApplyNewSettings()
def savemetadata(self, newmeta):
""" Save the metadata to file
Args:
metadata (dict): new metadata
"""
with open(self.outputMetadata, 'w') as f:
json.dump(newmeta, f)
def getstate(self):
state = 'idle'
if self.process:
if self.process.poll() is None:
state = 'running'
elif self.process.returncode != 0:
state = 'error'
if self.current_exposure is not None:
state = 'running'
return state
def getstatus(self):
""" Get out current status as a dict """
status = dict(state=self.getstate(), runningcmd=None,
current_exposure=self.current_exposure,
max_exposures=self.max_exposures,
statustime=str(datetime.now())[:-7],
lastfile=self.lastfile)
if self.process:
status['lastcmd'] = self.process.args[0]
status['lastreturn'] = self.process.poll()
if status['state'] == 'running':
status['runningcmd'] = path.basename(self.process.args[0])
try:
with open(self.logfilename, newline='') as logfile:
ts = datetime.fromtimestamp(path.getmtime(self.logfilename))
status['cmdoutput'] = f"Last output: {str(ts)[:-7]}\n"
status['cmdoutput'] += '#'*80+'\n'
lines = logfile.readlines()
if lines and lines[-1][-1] == '\r':
lines[-1] = lines[-1][:-1]
for line in lines:
if not line.endswith('\r'):
status['cmdoutput'] += line
except FileNotFoundError:
status['cmdoutput'] = ""
# info for the lastimg to update
status['lastimg'] = self.lastimgpath
try:
status['lastimg_timestamp'] = path.getmtime(self.lastimgpath)
except FileNotFoundError:
status['lastimg_timestamp'] = 0
return status
def endexposureloop(self):
""" Stop an ongoing exposure loop """
self.max_exposures = self.current_exposure
def abort(self, kill=False):
""" abort a currently running process """
log.warning("Received abort request")
self.current_exposure = None
if self.getstate() == 'running':
if kill:
self.process.kill()
else:
self.process.terminate()
with open(self.logfilename, 'a') as f:
print("!!!!!! process killed by user !!!!!!!", file=f)
# methods to run exectuables
def _run(self, args, cwd=None, env=None, logmode='wb'):
""" Run the commands in `args` in a subprocess """
args = tuple(str(arg) for arg in args)
if self.process and self.process.poll() is None:
raise RuntimeError("A process is already running")
if self.logfile:
self.logfile.close()
self.logfile = open(self.logfilename, logmode, buffering=0)
if env is not None:
env = dict(os.environ, **env,
PYTHONPATH=os.pathsep.join(sys.path))
self.process = subprocess.Popen(args, cwd=cwd, stdout=self.logfile,
stderr=subprocess.STDOUT, env=env)
def StartupAndErase(self):
return self._run(['./CCDDStartupAndErase', path.abspath(self.outputConfig)],
cwd=self.ccddpath)
def PerformEraseProcedure(self):
return self._run(['./CCDDPerformEraseProcedure', path.abspath(self.outputConfig)],
cwd=self.ccddpath)
def ApplyNewSettings(self, newconf=None):
if newconf:
self.saveconfig(newconf, apply=False)
return self._run(['./CCDDApplyNewSettings',
path.abspath(self.outputConfig)],
cwd=self.ccddpath)
def
|
(self, fitsfile, seconds=5):
""" Expose the CCD and read a new image to `fitsfile` """
# make sure the file has good name
if not fitsfile.endswith('.fits'):
fitsfile += '.fits'
tstamp = datetime.now().strftime('_%y%m%d-%H%M')
match = re.match(r'.*(_\d\d\d\d\d\d-\d\d\d\d)\.fits', fitsfile)
if not match:
fitsfile = fitsfile[:-5] + tstamp + '.fits'
elif match.group(1) != tstamp:
fitsfile = fitsfile[:-17] + tstamp + '.fits'
fitsfile = path.join(self.datapath, fitsfile)
self.lastfile = fitsfile
log.info("Starting new exposure, filename=%s",
path.basename(self.lastfile))
args = ['./CCDDExposeDB.py', str(seconds), fitsfile,
self.outputMetadata]
if self.lastimgpath:
args.append(self.lastimgpath)
return self._run(args,
env=dict(IMAGEDB_URI=self.imagedb_uri,
IMAGEDB_COLLECTION=self.imagedb_collection)
)
def _do_expose_loop(self, fitsfile, seconds):
""" private method to perform expose loop. Do not call directly! """
log.debug(f"Starting expose loop with {self.max_exposures} exposures")
while (self.current_exposure is not None and
self.current_exposure < self.max_exposures):
self.current_exposure += 1
self.Expose(fitsfile, seconds)
while self.process and self.process.poll() is None:
sleep(5)
if not self.process or self.process.returncode != 0
|
Expose
|
identifier_name
|
Executor.py
|
status """
def __init__(self, config=None, **kwargs):
"""
Args:
config (dict): dictionary of config settings. will be merged with
any other provided kwargs. valid keys are:
CCDDRONEPATH (str): path to top-level of CCDDrone installation
CCDDCONFIGFILE (str): path (under CCDDrone path) to store config
CCDDMETADATAFILE (str): path (under CCDDrone path) to store metadata
EXECUTOR_LOGFILE (str): where to put logs from CCDD executables
DATAPATH (str): path to save images
LASTIMGPATH (str): path to save png of last image taken
"""
def getkey(key, default=None):
return kwargs.get(key, config.get(key, default))
self.logfilename = getkey('EXECUTOR_LOGFILE', 'logs/Executor.log')
self.logfile = None
self.process = None
self.current_exposure = None
self.max_exposures = None
self.exposethread = None
self.lastfile=None
self.lastimgpath = getkey('LASTIMGPATH', 'static/lastimg.png')
self.datapath = getkey("DATAPATH", 'data')
self.ccddpath = getkey('CCDDRONEPATH')
CCDDConfigFile = getkey('CCDDCONFIGFILE','config/Config_GUI.ini')
CCDDMetaFile = getkey('CCDDMETADATAFILE', 'config/Metadata_GUI.json')
self.imagedb_uri = getkey("IMAGEDB_URI", ImageDB.default_uri)
self.imagedb_collection = getkey("IMAGEDB_COLLECTION",
ImageDB.default_collection)
# make sure the datapath exists
if not os.path.isdir(self.datapath):
try:
os.mkdir(self.datapath)
except FileNotFoundError:
raise ValueError(f"DATAPATH '{self.datapath}' does not exist"
"and can't be created")
# make sure ccdd path is real
if not os.path.isdir(self.ccddpath):
raise ValueError(f"CCDDRONEPATH '{self.ccddpath}' doesn't exist")
# make sure it is on PATH
if self.ccddpath not in os.getenv('PATH'):
os.environ['PATH'] = os.pathsep.join([self.ccddpath,
os.getenv('PATH')])
self.outputConfig = path.abspath(path.join(self.ccddpath,
CCDDConfigFile))
self.outputMetadata = path.join(self.ccddpath, CCDDMetaFile)
log.debug("New executor created, config=%s, meta=%s, imagedb=%s/%s",
self.outputConfig, self.outputMetadata,
self.imagedb_uri, self.imagedb_collection)
def readconfig(self):
""" Get the current config file and return as string """
files = [path.join(self.ccddpath, 'do_not_touch', 'LastSettings.ini'),
path.join(self.ccddpath, 'config', 'Config.ini'),
self.outputConfig]
last = sorted(files, reverse=True,
key=lambda f: path.getmtime(f) if path.isfile(f) else 0)
log.debug("Reading config settings from %s", last[0])
try:
with open(last[0]) as f:
return f.read()
except FileNotFoundError:
return None
def saveconfig(self, newconf, apply=True):
""" Save the config settings in `newconf` to file.
Args:
newconf (str): contents of ini config file as string
apply (bool): if True, call CCDDApplyNewSettings after
"""
with open(self.outputConfig, 'w') as f:
f.write(newconf)
if apply:
self.ApplyNewSettings()
def savemetadata(self, newmeta):
""" Save the metadata to file
Args:
metadata (dict): new metadata
"""
with open(self.outputMetadata, 'w') as f:
json.dump(newmeta, f)
def getstate(self):
state = 'idle'
if self.process:
if self.process.poll() is None:
state = 'running'
elif self.process.returncode != 0:
state = 'error'
if self.current_exposure is not None:
state = 'running'
return state
def getstatus(self):
""" Get out current status as a dict """
status = dict(state=self.getstate(), runningcmd=None,
current_exposure=self.current_exposure,
max_exposures=self.max_exposures,
statustime=str(datetime.now())[:-7],
lastfile=self.lastfile)
if self.process:
status['lastcmd'] = self.process.args[0]
status['lastreturn'] = self.process.poll()
if status['state'] == 'running':
status['runningcmd'] = path.basename(self.process.args[0])
try:
with open(self.logfilename, newline='') as logfile:
ts = datetime.fromtimestamp(path.getmtime(self.logfilename))
status['cmdoutput'] = f"Last output: {str(ts)[:-7]}\n"
status['cmdoutput'] += '#'*80+'\n'
lines = logfile.readlines()
if lines and lines[-1][-1] == '\r':
lines[-1] = lines[-1][:-1]
for line in lines:
if not line.endswith('\r'):
status['cmdoutput'] += line
except FileNotFoundError:
status['cmdoutput'] = ""
# info for the lastimg to update
status['lastimg'] = self.lastimgpath
try:
status['lastimg_timestamp'] = path.getmtime(self.lastimgpath)
except FileNotFoundError:
status['lastimg_timestamp'] = 0
return status
def endexposureloop(self):
""" Stop an ongoing exposure loop """
self.max_exposures = self.current_exposure
def abort(self, kill=False):
""" abort a currently running process """
log.warning("Received abort request")
self.current_exposure = None
if self.getstate() == 'running':
|
# methods to run exectuables
def _run(self, args, cwd=None, env=None, logmode='wb'):
""" Run the commands in `args` in a subprocess """
args = tuple(str(arg) for arg in args)
if self.process and self.process.poll() is None:
raise RuntimeError("A process is already running")
if self.logfile:
self.logfile.close()
self.logfile = open(self.logfilename, logmode, buffering=0)
if env is not None:
env = dict(os.environ, **env,
PYTHONPATH=os.pathsep.join(sys.path))
self.process = subprocess.Popen(args, cwd=cwd, stdout=self.logfile,
stderr=subprocess.STDOUT, env=env)
def StartupAndErase(self):
return self._run(['./CCDDStartupAndErase', path.abspath(self.outputConfig)],
cwd=self.ccddpath)
def PerformEraseProcedure(self):
return self._run(['./CCDDPerformEraseProcedure', path.abspath(self.outputConfig)],
cwd=self.ccddpath)
def ApplyNewSettings(self, newconf=None):
if newconf:
self.saveconfig(newconf, apply=False)
return self._run(['./CCDDApplyNewSettings',
path.abspath(self.outputConfig)],
cwd=self.ccddpath)
def Expose(self, fitsfile, seconds=5):
""" Expose the CCD and read a new image to `fitsfile` """
# make sure the file has good name
if not fitsfile.endswith('.fits'):
fitsfile += '.fits'
tstamp = datetime.now().strftime('_%y%m%d-%H%M')
match = re.match(r'.*(_\d\d\d\d\d\d-\d\d\d\d)\.fits', fitsfile)
if not match:
fitsfile = fitsfile[:-5] + tstamp + '.fits'
elif match.group(1) != tstamp:
fitsfile = fitsfile[:-17] + tstamp + '.fits'
fitsfile = path.join(self.datapath, fitsfile)
self.lastfile = fitsfile
log.info("Starting new exposure, filename=%s",
path.basename(self.lastfile))
args = ['./CCDDExposeDB.py', str(seconds), fitsfile,
self.outputMetadata]
if self.lastimgpath:
args.append(self.lastimgpath)
return self._run(args,
env=dict(IMAGEDB_URI=self.imagedb_uri,
IMAGEDB_COLLECTION=self.imagedb_collection)
)
def _do_expose_loop(self, fitsfile, seconds):
""" private method to perform expose loop. Do not call directly! """
log.debug(f"Starting expose loop with {self.max_exposures} exposures")
while (self.current_exposure is not None and
self.current_exposure < self.max_exposures):
self.current_exposure += 1
self.Expose(fitsfile, seconds)
while self.process and self.process.poll() is None:
sleep(5)
if not self.process or self.process.returncode !=
|
if kill:
self.process.kill()
else:
self.process.terminate()
with open(self.logfilename, 'a') as f:
print("!!!!!! process killed by user !!!!!!!", file=f)
|
conditional_block
|
Executor.py
|
self.max_exposures = None
self.exposethread = None
self.lastfile=None
self.lastimgpath = getkey('LASTIMGPATH', 'static/lastimg.png')
self.datapath = getkey("DATAPATH", 'data')
self.ccddpath = getkey('CCDDRONEPATH')
CCDDConfigFile = getkey('CCDDCONFIGFILE','config/Config_GUI.ini')
CCDDMetaFile = getkey('CCDDMETADATAFILE', 'config/Metadata_GUI.json')
self.imagedb_uri = getkey("IMAGEDB_URI", ImageDB.default_uri)
self.imagedb_collection = getkey("IMAGEDB_COLLECTION",
ImageDB.default_collection)
# make sure the datapath exists
if not os.path.isdir(self.datapath):
try:
os.mkdir(self.datapath)
except FileNotFoundError:
raise ValueError(f"DATAPATH '{self.datapath}' does not exist"
"and can't be created")
# make sure ccdd path is real
if not os.path.isdir(self.ccddpath):
raise ValueError(f"CCDDRONEPATH '{self.ccddpath}' doesn't exist")
# make sure it is on PATH
if self.ccddpath not in os.getenv('PATH'):
os.environ['PATH'] = os.pathsep.join([self.ccddpath,
os.getenv('PATH')])
self.outputConfig = path.abspath(path.join(self.ccddpath,
CCDDConfigFile))
self.outputMetadata = path.join(self.ccddpath, CCDDMetaFile)
log.debug("New executor created, config=%s, meta=%s, imagedb=%s/%s",
self.outputConfig, self.outputMetadata,
self.imagedb_uri, self.imagedb_collection)
def readconfig(self):
""" Get the current config file and return as string """
files = [path.join(self.ccddpath, 'do_not_touch', 'LastSettings.ini'),
path.join(self.ccddpath, 'config', 'Config.ini'),
self.outputConfig]
last = sorted(files, reverse=True,
key=lambda f: path.getmtime(f) if path.isfile(f) else 0)
log.debug("Reading config settings from %s", last[0])
try:
with open(last[0]) as f:
return f.read()
except FileNotFoundError:
return None
def saveconfig(self, newconf, apply=True):
""" Save the config settings in `newconf` to file.
Args:
newconf (str): contents of ini config file as string
apply (bool): if True, call CCDDApplyNewSettings after
"""
with open(self.outputConfig, 'w') as f:
f.write(newconf)
if apply:
self.ApplyNewSettings()
def savemetadata(self, newmeta):
""" Save the metadata to file
Args:
metadata (dict): new metadata
"""
with open(self.outputMetadata, 'w') as f:
json.dump(newmeta, f)
def getstate(self):
state = 'idle'
if self.process:
if self.process.poll() is None:
state = 'running'
elif self.process.returncode != 0:
state = 'error'
if self.current_exposure is not None:
state = 'running'
return state
def getstatus(self):
""" Get out current status as a dict """
status = dict(state=self.getstate(), runningcmd=None,
current_exposure=self.current_exposure,
max_exposures=self.max_exposures,
statustime=str(datetime.now())[:-7],
lastfile=self.lastfile)
if self.process:
status['lastcmd'] = self.process.args[0]
status['lastreturn'] = self.process.poll()
if status['state'] == 'running':
status['runningcmd'] = path.basename(self.process.args[0])
try:
with open(self.logfilename, newline='') as logfile:
ts = datetime.fromtimestamp(path.getmtime(self.logfilename))
status['cmdoutput'] = f"Last output: {str(ts)[:-7]}\n"
status['cmdoutput'] += '#'*80+'\n'
lines = logfile.readlines()
if lines and lines[-1][-1] == '\r':
lines[-1] = lines[-1][:-1]
for line in lines:
if not line.endswith('\r'):
status['cmdoutput'] += line
except FileNotFoundError:
status['cmdoutput'] = ""
# info for the lastimg to update
status['lastimg'] = self.lastimgpath
try:
status['lastimg_timestamp'] = path.getmtime(self.lastimgpath)
except FileNotFoundError:
status['lastimg_timestamp'] = 0
return status
def endexposureloop(self):
""" Stop an ongoing exposure loop """
self.max_exposures = self.current_exposure
def abort(self, kill=False):
""" abort a currently running process """
log.warning("Received abort request")
self.current_exposure = None
if self.getstate() == 'running':
if kill:
self.process.kill()
else:
self.process.terminate()
with open(self.logfilename, 'a') as f:
print("!!!!!! process killed by user !!!!!!!", file=f)
# methods to run exectuables
def _run(self, args, cwd=None, env=None, logmode='wb'):
""" Run the commands in `args` in a subprocess """
args = tuple(str(arg) for arg in args)
if self.process and self.process.poll() is None:
raise RuntimeError("A process is already running")
if self.logfile:
self.logfile.close()
self.logfile = open(self.logfilename, logmode, buffering=0)
if env is not None:
env = dict(os.environ, **env,
PYTHONPATH=os.pathsep.join(sys.path))
self.process = subprocess.Popen(args, cwd=cwd, stdout=self.logfile,
stderr=subprocess.STDOUT, env=env)
def StartupAndErase(self):
return self._run(['./CCDDStartupAndErase', path.abspath(self.outputConfig)],
cwd=self.ccddpath)
def PerformEraseProcedure(self):
return self._run(['./CCDDPerformEraseProcedure', path.abspath(self.outputConfig)],
cwd=self.ccddpath)
def ApplyNewSettings(self, newconf=None):
if newconf:
self.saveconfig(newconf, apply=False)
return self._run(['./CCDDApplyNewSettings',
path.abspath(self.outputConfig)],
cwd=self.ccddpath)
def Expose(self, fitsfile, seconds=5):
""" Expose the CCD and read a new image to `fitsfile` """
# make sure the file has good name
if not fitsfile.endswith('.fits'):
fitsfile += '.fits'
tstamp = datetime.now().strftime('_%y%m%d-%H%M')
match = re.match(r'.*(_\d\d\d\d\d\d-\d\d\d\d)\.fits', fitsfile)
if not match:
fitsfile = fitsfile[:-5] + tstamp + '.fits'
elif match.group(1) != tstamp:
fitsfile = fitsfile[:-17] + tstamp + '.fits'
fitsfile = path.join(self.datapath, fitsfile)
self.lastfile = fitsfile
log.info("Starting new exposure, filename=%s",
path.basename(self.lastfile))
args = ['./CCDDExposeDB.py', str(seconds), fitsfile,
self.outputMetadata]
if self.lastimgpath:
args.append(self.lastimgpath)
return self._run(args,
env=dict(IMAGEDB_URI=self.imagedb_uri,
IMAGEDB_COLLECTION=self.imagedb_collection)
)
def _do_expose_loop(self, fitsfile, seconds):
""" private method to perform expose loop. Do not call directly! """
log.debug(f"Starting expose loop with {self.max_exposures} exposures")
while (self.current_exposure is not None and
self.current_exposure < self.max_exposures):
self.current_exposure += 1
self.Expose(fitsfile, seconds)
while self.process and self.process.poll() is None:
sleep(5)
if not self.process
|
""" Run CCDD processes and keep track of status """
def __init__(self, config=None, **kwargs):
"""
Args:
config (dict): dictionary of config settings. will be merged with
any other provided kwargs. valid keys are:
CCDDRONEPATH (str): path to top-level of CCDDrone installation
CCDDCONFIGFILE (str): path (under CCDDrone path) to store config
CCDDMETADATAFILE (str): path (under CCDDrone path) to store metadata
EXECUTOR_LOGFILE (str): where to put logs from CCDD executables
DATAPATH (str): path to save images
LASTIMGPATH (str): path to save png of last image taken
"""
def getkey(key, default=None):
return kwargs.get(key, config.get(key, default))
self.logfilename = getkey('EXECUTOR_LOGFILE', 'logs/Executor.log')
self.logfile = None
self.process = None
self.current_exposure = None
|
identifier_body
|
|
UTM.py
|
return N
N_or_S = "N"
if "S" in row:
N_or_S = "S"
return N_or_S
@staticmethod
def zone2use(el_df):
"""
Create a common UTM Zone for this facility from the emission locations.
All emission sources input to Aermod must have UTM coordinates
from a single UTM zone. This function will determine the single
UTM zone (and hemisphere) to use. Parameter is the emissions
location data frame.
"""
# First, check for any utm zones provided by the user in the emission location file
utmzones_df = el_df["utmzone"].loc[el_df["location_type"] == "U"]
if utmzones_df.shape[0] > 0:
# there are some; find the smallest one
utmzones_df['utmzone'] = utmzones_df.apply(lambda row: UTM.getZone(row))
utmzones_df['utmband'] = utmzones_df.apply(lambda row: UTM.getBand(row))
min_utmzu = int(np.nan_to_num(utmzones_df['utmzone']).min(axis=0))
min_utmbu = utmzones_df['utmband'].min()
else:
min_utmzu = 0
min_utmbu = "Z"
# Next, compute utm zones from any user provided longitudes and find smallest
lon_df = el_df[["lon"]].loc[el_df["location_type"] == "L"]
if lon_df.shape[0] > 0:
lon_df["z"] = ((lon_df["lon"]+180)/6 + 1).astype(int)
min_utmzl = int(np.nan_to_num(lon_df["z"]).min(axis=0))
else:
min_utmzl = 0
lat_df = el_df[["lat"]].loc[el_df["location_type"] == "L"]
if lat_df.shape[0] > 0 and lat_df["lat"].min() < 0:
min_utmbl = "S"
else:
min_utmbl = "N"
if min_utmzu == 0:
utmzone = min_utmzl
else:
if min_utmzl == 0:
utmzone = min_utmzu
else:
utmzone = min(min_utmzu, min_utmzl)
hemi = min(min_utmbu, min_utmbl)
if utmzone == 0:
emessage = "Error! UTM zone is 0"
Logger.logMessage(emessage)
raise Exception(emessage)
if hemi == "Z":
emessage = "Error! Hemisphere of UTM zone could not be determined."
Logger.logMessage(emessage)
raise Exception(emessage)
return utmzone, hemi
@staticmethod
def utm2ll(utmn,utme,zone):
zonenum = UTM.getZone(zone)
zonehemi = UTM.getBand(zone)
zonetxt = UTM.zonetxt(zonenum)
if zonehemi == "N":
epsg = 'epsg:326'+str(zonetxt)
else:
epsg = 'epsg:327'+str(zonetxt)
transformer = UTM.getTransformer(epsg, 'epsg:4326')
lon,lat = transformer.transform(utme, utmn)
return lat, lon
@staticmethod
def ll2utm(lat,lon):
zone = int((lon + 180)/6 + 1)
zonetxt = UTM.zonetxt(zone)
if lat < 0:
|
else:
hemi = "N"
if hemi == "N":
epsg = 'epsg:326'+str(zonetxt)
else:
epsg = 'epsg:327'+str(zonetxt)
transformer = UTM.getTransformer('epsg:4326', epsg)
# Use the cached transformer to perform the transformation more quickly!
# see https://pyproj4.github.io/pyproj/stable/advanced_examples.html#optimize-transformations
utme, utmn = transformer.transform(lon, lat)
utme = round(utme)
utmn = round(utmn)
return utmn, utme, zone, hemi, epsg
@staticmethod
def ll2utm_alt(lat,lon,zoneUsed, hemiUsed):
realN, realE, realZone, realHemi, realepsg = UTM.ll2utm(lat,lon)
if zoneUsed == realZone:
return realN, realE
else:
if realZone > zoneUsed:
epsgUsed = 'epsg:' + str(int(realepsg.split(sep=':')[1]) - 1)
else:
epsgUsed = 'epsg:' + str(int(realepsg.split(sep=':')[1]) + 1)
if zoneUsed == 60 and realHemi == "N":
epsgUsed = "epsg:32660"
if zoneUsed == 60 and realHemi == "S":
epsgUsed = "epsg:32760"
transformer = UTM.getTransformer(realepsg, epsgUsed)
utme, utmn = transformer.transform(realE, realN)
return round(utmn), round(utme)
@staticmethod
def center(sourcelocs, facutmznum, fachemi):
"""
This method computes the center of a facility from the emission
location UTM coordinates. The overall facility UTM zone and hemisphere are needed.
"""
# Fill up lists of x and y coordinates of all source vertices
vertx_l = []
verty_l = []
for index, row in sourcelocs.iterrows():
vertx_l.append(row["utme"])
verty_l.append(row["utmn"])
# If this is an area source, add the other 3 corners to vertex list
if row["source_type"].upper() == "A":
angle_rad = m.radians(row["angle"])
utme1 = row["utme"] + row["lengthx"] * m.cos(angle_rad)
utmn1 = row["utmn"] - row["lengthx"] * m.sin(angle_rad)
utme2 = (row["utme"] + (row["lengthx"] * m.cos(angle_rad)) +
(row["lengthy"] * m.sin(angle_rad)))
utmn2 = (row["utmn"] + (row["lengthy"] * m.cos(angle_rad)) -
(row["lengthx"] * m.sin(angle_rad)))
utme3 = row["utme"] + row["lengthy"] * m.sin(angle_rad)
utmn3 = row["utmn"] + row["lengthy"] * m.cos(angle_rad)
vertx_l.append(utme1)
vertx_l.append(utme2)
vertx_l.append(utme3)
verty_l.append(utmn1)
verty_l.append(utmn2)
verty_l.append(utmn3)
# If this is a volume source, then add the vertices of it
if row["source_type"].upper() == "V":
utme1 = row["utme"] + row["lengthx"] * m.sqrt(2)/2
utmn1 = row["utmn"] - row["lengthy"] * m.sqrt(2)/2
utme2 = row["utme"] + row["lengthx"] * m.sqrt(2)/2
utmn2 = row["utmn"] + row["lengthy"] * m.sqrt(2)/2
utme3 = row["utme"] - row["lengthx"] * m.sqrt(2)/2
utmn3 = row["utmn"] + row["lengthy"] * m.sqrt(2)/2
vertx_l.append(utme1)
vertx_l.append(utme2)
vertx_l.append(utme3)
verty_l.append(utmn1)
verty_l.append(utmn2)
verty_l.append(utmn3)
# If line or buoyant line source, add second vertex
if row["source_type"].upper() == "N" or row["source_type"].upper() == "B":
vertx_l.append(row["utme_x2"])
verty_l.append(row["utmn_y2"])
vertx_a = np.array(vertx_l)
verty_a = np.array(verty_l)
# Combine the x and y vertices lists into list of tuples and then get a
# unique list of vertices of the form (x, y) where x=utme and y=utmn
sourceverts = list(zip(vertx_l, verty_l))
unique_verts = list(set(sourceverts))
# Find the two vertices that are the farthest apart
# Also find the corners of the modeling domain
max_dist = 0
max_x
|
hemi = "S"
|
conditional_block
|
UTM.py
|
_utmzu
else:
utmzone = min(min_utmzu, min_utmzl)
hemi = min(min_utmbu, min_utmbl)
if utmzone == 0:
emessage = "Error! UTM zone is 0"
Logger.logMessage(emessage)
raise Exception(emessage)
if hemi == "Z":
emessage = "Error! Hemisphere of UTM zone could not be determined."
Logger.logMessage(emessage)
raise Exception(emessage)
return utmzone, hemi
@staticmethod
def utm2ll(utmn,utme,zone):
zonenum = UTM.getZone(zone)
zonehemi = UTM.getBand(zone)
zonetxt = UTM.zonetxt(zonenum)
if zonehemi == "N":
epsg = 'epsg:326'+str(zonetxt)
else:
epsg = 'epsg:327'+str(zonetxt)
transformer = UTM.getTransformer(epsg, 'epsg:4326')
lon,lat = transformer.transform(utme, utmn)
return lat, lon
@staticmethod
def ll2utm(lat,lon):
zone = int((lon + 180)/6 + 1)
zonetxt = UTM.zonetxt(zone)
if lat < 0:
hemi = "S"
else:
hemi = "N"
if hemi == "N":
epsg = 'epsg:326'+str(zonetxt)
else:
epsg = 'epsg:327'+str(zonetxt)
transformer = UTM.getTransformer('epsg:4326', epsg)
# Use the cached transformer to perform the transformation more quickly!
# see https://pyproj4.github.io/pyproj/stable/advanced_examples.html#optimize-transformations
utme, utmn = transformer.transform(lon, lat)
utme = round(utme)
utmn = round(utmn)
return utmn, utme, zone, hemi, epsg
@staticmethod
def ll2utm_alt(lat,lon,zoneUsed, hemiUsed):
realN, realE, realZone, realHemi, realepsg = UTM.ll2utm(lat,lon)
if zoneUsed == realZone:
return realN, realE
else:
if realZone > zoneUsed:
epsgUsed = 'epsg:' + str(int(realepsg.split(sep=':')[1]) - 1)
else:
epsgUsed = 'epsg:' + str(int(realepsg.split(sep=':')[1]) + 1)
if zoneUsed == 60 and realHemi == "N":
epsgUsed = "epsg:32660"
if zoneUsed == 60 and realHemi == "S":
epsgUsed = "epsg:32760"
transformer = UTM.getTransformer(realepsg, epsgUsed)
utme, utmn = transformer.transform(realE, realN)
return round(utmn), round(utme)
@staticmethod
def center(sourcelocs, facutmznum, fachemi):
"""
This method computes the center of a facility from the emission
location UTM coordinates. The overall facility UTM zone and hemisphere are needed.
"""
# Fill up lists of x and y coordinates of all source vertices
vertx_l = []
verty_l = []
for index, row in sourcelocs.iterrows():
vertx_l.append(row["utme"])
verty_l.append(row["utmn"])
# If this is an area source, add the other 3 corners to vertex list
if row["source_type"].upper() == "A":
angle_rad = m.radians(row["angle"])
utme1 = row["utme"] + row["lengthx"] * m.cos(angle_rad)
utmn1 = row["utmn"] - row["lengthx"] * m.sin(angle_rad)
utme2 = (row["utme"] + (row["lengthx"] * m.cos(angle_rad)) +
(row["lengthy"] * m.sin(angle_rad)))
utmn2 = (row["utmn"] + (row["lengthy"] * m.cos(angle_rad)) -
(row["lengthx"] * m.sin(angle_rad)))
utme3 = row["utme"] + row["lengthy"] * m.sin(angle_rad)
utmn3 = row["utmn"] + row["lengthy"] * m.cos(angle_rad)
vertx_l.append(utme1)
vertx_l.append(utme2)
vertx_l.append(utme3)
verty_l.append(utmn1)
verty_l.append(utmn2)
verty_l.append(utmn3)
# If this is a volume source, then add the vertices of it
if row["source_type"].upper() == "V":
utme1 = row["utme"] + row["lengthx"] * m.sqrt(2)/2
utmn1 = row["utmn"] - row["lengthy"] * m.sqrt(2)/2
utme2 = row["utme"] + row["lengthx"] * m.sqrt(2)/2
utmn2 = row["utmn"] + row["lengthy"] * m.sqrt(2)/2
utme3 = row["utme"] - row["lengthx"] * m.sqrt(2)/2
utmn3 = row["utmn"] + row["lengthy"] * m.sqrt(2)/2
vertx_l.append(utme1)
vertx_l.append(utme2)
vertx_l.append(utme3)
verty_l.append(utmn1)
verty_l.append(utmn2)
verty_l.append(utmn3)
# If line or buoyant line source, add second vertex
if row["source_type"].upper() == "N" or row["source_type"].upper() == "B":
vertx_l.append(row["utme_x2"])
verty_l.append(row["utmn_y2"])
vertx_a = np.array(vertx_l)
verty_a = np.array(verty_l)
# Combine the x and y vertices lists into list of tuples and then get a
# unique list of vertices of the form (x, y) where x=utme and y=utmn
sourceverts = list(zip(vertx_l, verty_l))
unique_verts = list(set(sourceverts))
# Find the two vertices that are the farthest apart
# Also find the corners of the modeling domain
max_dist = 0
max_x = min_x = vertx_a[0]
max_y = min_y = verty_a[0]
if len(unique_verts) > 1: #more than one source coordinate
# initialize
xmax1 = unique_verts[0][0]
ymax1 = unique_verts[0][1]
xmax2 = unique_verts[1][0]
ymax2 = unique_verts[1][1]
for i in range(0, len(unique_verts)-1):
# corners
max_x = max(max_x, unique_verts[i][0])
max_y = max(max_y, unique_verts[i][1])
min_x = min(min_x, unique_verts[i][0])
min_y = min(min_y, unique_verts[i][1])
# find farthest apart
j = i + 1
for k in range(j, len(unique_verts)):
dist = m.sqrt((unique_verts[i][0] - unique_verts[k][0])**2 +
(unique_verts[i][1] - unique_verts[k][1])**2)
if dist > max_dist:
max_dist = dist
xmax1 = unique_verts[i][0]
ymax1 = unique_verts[i][1]
xmax2 = unique_verts[k][0]
ymax2 = unique_verts[k][1]
# Calculate the center of the facility in utm coordinates
cenx = round((xmax1 + xmax2) / 2)
ceny = round((ymax1 + ymax2) / 2)
else: #single source coordinate
# Calculate the center of the facility in utm coordinates
cenx = round(max_x)
ceny = round(max_y)
# Compute the lat/lon of the center
utmz = str(facutmznum) + fachemi
cenlat, cenlon = UTM.utm2ll(ceny, cenx, utmz)
return cenx, ceny, cenlon, cenlat, max_dist, vertx_a, verty_a
# This method returns the correct transformer to use. It comes either from
# the cache (if it's been requested previously) or from the Transformer instantiation
# method. Note that the transformers cache is keyed by the concatentation of the two
# projection epsg values.
@staticmethod
def
|
getTransformer
|
identifier_name
|
|
UTM.py
|
@staticmethod
def getBand(row):
# returns the hemisphere (N or S) portion of a zone string; if none return N
N_or_S = "N"
if "S" in row:
N_or_S = "S"
return N_or_S
@staticmethod
def zone2use(el_df):
"""
Create a common UTM Zone for this facility from the emission locations.
All emission sources input to Aermod must have UTM coordinates
from a single UTM zone. This function will determine the single
UTM zone (and hemisphere) to use. Parameter is the emissions
location data frame.
"""
# First, check for any utm zones provided by the user in the emission location file
utmzones_df = el_df["utmzone"].loc[el_df["location_type"] == "U"]
if utmzones_df.shape[0] > 0:
# there are some; find the smallest one
utmzones_df['utmzone'] = utmzones_df.apply(lambda row: UTM.getZone(row))
utmzones_df['utmband'] = utmzones_df.apply(lambda row: UTM.getBand(row))
min_utmzu = int(np.nan_to_num(utmzones_df['utmzone']).min(axis=0))
min_utmbu = utmzones_df['utmband'].min()
else:
min_utmzu = 0
min_utmbu = "Z"
# Next, compute utm zones from any user provided longitudes and find smallest
lon_df = el_df[["lon"]].loc[el_df["location_type"] == "L"]
if lon_df.shape[0] > 0:
lon_df["z"] = ((lon_df["lon"]+180)/6 + 1).astype(int)
min_utmzl = int(np.nan_to_num(lon_df["z"]).min(axis=0))
else:
min_utmzl = 0
lat_df = el_df[["lat"]].loc[el_df["location_type"] == "L"]
if lat_df.shape[0] > 0 and lat_df["lat"].min() < 0:
min_utmbl = "S"
else:
min_utmbl = "N"
if min_utmzu == 0:
utmzone = min_utmzl
else:
if min_utmzl == 0:
utmzone = min_utmzu
else:
utmzone = min(min_utmzu, min_utmzl)
hemi = min(min_utmbu, min_utmbl)
if utmzone == 0:
emessage = "Error! UTM zone is 0"
Logger.logMessage(emessage)
raise Exception(emessage)
if hemi == "Z":
emessage = "Error! Hemisphere of UTM zone could not be determined."
Logger.logMessage(emessage)
raise Exception(emessage)
return utmzone, hemi
@staticmethod
def utm2ll(utmn,utme,zone):
zonenum = UTM.getZone(zone)
zonehemi = UTM.getBand(zone)
zonetxt = UTM.zonetxt(zonenum)
if zonehemi == "N":
epsg = 'epsg:326'+str(zonetxt)
else:
epsg = 'epsg:327'+str(zonetxt)
transformer = UTM.getTransformer(epsg, 'epsg:4326')
lon,lat = transformer.transform(utme, utmn)
return lat, lon
@staticmethod
def ll2utm(lat,lon):
zone = int((lon + 180)/6 + 1)
zonetxt = UTM.zonetxt(zone)
if lat < 0:
hemi = "S"
else:
hemi = "N"
if hemi == "N":
epsg = 'epsg:326'+str(zonetxt)
else:
epsg = 'epsg:327'+str(zonetxt)
transformer = UTM.getTransformer('epsg:4326', epsg)
# Use the cached transformer to perform the transformation more quickly!
# see https://pyproj4.github.io/pyproj/stable/advanced_examples.html#optimize-transformations
utme, utmn = transformer.transform(lon, lat)
utme = round(utme)
utmn = round(utmn)
return utmn, utme, zone, hemi, epsg
@staticmethod
def ll2utm_alt(lat,lon,zoneUsed, hemiUsed):
realN, realE, realZone, realHemi, realepsg = UTM.ll2utm(lat,lon)
if zoneUsed == realZone:
return realN, realE
else:
if realZone > zoneUsed:
epsgUsed = 'epsg:' + str(int(realepsg.split(sep=':')[1]) - 1)
else:
epsgUsed = 'epsg:' + str(int(realepsg.split(sep=':')[1]) + 1)
if zoneUsed == 60 and realHemi == "N":
epsgUsed = "epsg:32660"
if zoneUsed == 60 and realHemi == "S":
epsgUsed = "epsg:32760"
transformer = UTM.getTransformer(realepsg, epsgUsed)
utme, utmn = transformer.transform(realE, realN)
return round(utmn), round(utme)
@staticmethod
def center(sourcelocs, facutmznum, fachemi):
"""
This method computes the center of a facility from the emission
location UTM coordinates. The overall facility UTM zone and hemisphere are needed.
"""
# Fill up lists of x and y coordinates of all source vertices
vertx_l = []
verty_l = []
for index, row in sourcelocs.iterrows():
vertx_l.append(row["utme"])
verty_l.append(row["utmn"])
# If this is an area source, add the other 3 corners to vertex list
if row["source_type"].upper() == "A":
angle_rad = m.radians(row["angle"])
utme1 = row["utme"] + row["lengthx"] * m.cos(angle_rad)
utmn1 = row["utmn"] - row["lengthx"] * m.sin(angle_rad)
utme2 = (row["utme"] + (row["lengthx"] * m.cos(angle_rad)) +
(row["lengthy"] * m.sin(angle_rad)))
utmn2 = (row["utmn"] + (row["lengthy"] * m.cos(angle_rad)) -
(row["lengthx"] * m.sin(angle_rad)))
utme3 = row["utme"] + row["lengthy"] * m.sin(angle_rad)
utmn3 = row["utmn"] + row["lengthy"] * m.cos(angle_rad)
vertx_l.append(utme1)
vertx_l.append(utme2)
vertx_l.append(utme3)
verty_l.append(utmn1)
verty_l.append(utmn2)
verty_l.append(utmn3)
# If this is a volume source, then add the vertices of it
if row["source_type"].upper() == "V":
utme1 = row["utme"] + row["lengthx"] * m.sqrt(2)/2
utmn1 = row["utmn"] - row["lengthy"] * m.sqrt(2)/2
utme2 = row["utme"] + row["lengthx"] * m.sqrt(2)/2
utmn2 = row["utmn"] + row["lengthy"] * m.sqrt(2)/2
utme3 = row["utme"] - row["lengthx"] * m.sqrt(2)/2
utmn3 = row["utmn"] + row["lengthy"] * m.sqrt(2)/2
vertx_l.append(utme1)
vertx_l.append(utme2)
vertx_l.append(utme3)
verty_l.append(utmn1)
verty_l.append(utmn2)
verty_l.append(utmn3)
# If line or buoyant line source, add second vertex
if row["source_type"].upper() == "N" or row["source_type"].upper() == "B":
vertx_l.append(row["utme_x2"])
verty_l.append(row["utmn_y2"])
vertx_a = np.array(vertx_l)
verty_a = np.array(verty_l)
# Combine the x and y vertices lists into list of tuples and then get a
# unique list of vertices of the form (x, y) where x
|
hemilist = ['N', 'S']
if any(elem in zonestr for elem in hemilist):
return zonestr[:-1]
else:
return zonestr
|
identifier_body
|
|
UTM.py
|
none return N
N_or_S = "N"
if "S" in row:
N_or_S = "S"
return N_or_S
@staticmethod
def zone2use(el_df):
"""
Create a common UTM Zone for this facility from the emission locations.
All emission sources input to Aermod must have UTM coordinates
from a single UTM zone. This function will determine the single
UTM zone (and hemisphere) to use. Parameter is the emissions
location data frame.
"""
# First, check for any utm zones provided by the user in the emission location file
utmzones_df = el_df["utmzone"].loc[el_df["location_type"] == "U"]
if utmzones_df.shape[0] > 0:
# there are some; find the smallest one
utmzones_df['utmzone'] = utmzones_df.apply(lambda row: UTM.getZone(row))
utmzones_df['utmband'] = utmzones_df.apply(lambda row: UTM.getBand(row))
min_utmzu = int(np.nan_to_num(utmzones_df['utmzone']).min(axis=0))
min_utmbu = utmzones_df['utmband'].min()
else:
min_utmzu = 0
min_utmbu = "Z"
# Next, compute utm zones from any user provided longitudes and find smallest
lon_df = el_df[["lon"]].loc[el_df["location_type"] == "L"]
if lon_df.shape[0] > 0:
lon_df["z"] = ((lon_df["lon"]+180)/6 + 1).astype(int)
min_utmzl = int(np.nan_to_num(lon_df["z"]).min(axis=0))
else:
min_utmzl = 0
|
else:
min_utmbl = "N"
if min_utmzu == 0:
utmzone = min_utmzl
else:
if min_utmzl == 0:
utmzone = min_utmzu
else:
utmzone = min(min_utmzu, min_utmzl)
hemi = min(min_utmbu, min_utmbl)
if utmzone == 0:
emessage = "Error! UTM zone is 0"
Logger.logMessage(emessage)
raise Exception(emessage)
if hemi == "Z":
emessage = "Error! Hemisphere of UTM zone could not be determined."
Logger.logMessage(emessage)
raise Exception(emessage)
return utmzone, hemi
@staticmethod
def utm2ll(utmn,utme,zone):
zonenum = UTM.getZone(zone)
zonehemi = UTM.getBand(zone)
zonetxt = UTM.zonetxt(zonenum)
if zonehemi == "N":
epsg = 'epsg:326'+str(zonetxt)
else:
epsg = 'epsg:327'+str(zonetxt)
transformer = UTM.getTransformer(epsg, 'epsg:4326')
lon,lat = transformer.transform(utme, utmn)
return lat, lon
@staticmethod
def ll2utm(lat,lon):
zone = int((lon + 180)/6 + 1)
zonetxt = UTM.zonetxt(zone)
if lat < 0:
hemi = "S"
else:
hemi = "N"
if hemi == "N":
epsg = 'epsg:326'+str(zonetxt)
else:
epsg = 'epsg:327'+str(zonetxt)
transformer = UTM.getTransformer('epsg:4326', epsg)
# Use the cached transformer to perform the transformation more quickly!
# see https://pyproj4.github.io/pyproj/stable/advanced_examples.html#optimize-transformations
utme, utmn = transformer.transform(lon, lat)
utme = round(utme)
utmn = round(utmn)
return utmn, utme, zone, hemi, epsg
@staticmethod
def ll2utm_alt(lat,lon,zoneUsed, hemiUsed):
realN, realE, realZone, realHemi, realepsg = UTM.ll2utm(lat,lon)
if zoneUsed == realZone:
return realN, realE
else:
if realZone > zoneUsed:
epsgUsed = 'epsg:' + str(int(realepsg.split(sep=':')[1]) - 1)
else:
epsgUsed = 'epsg:' + str(int(realepsg.split(sep=':')[1]) + 1)
if zoneUsed == 60 and realHemi == "N":
epsgUsed = "epsg:32660"
if zoneUsed == 60 and realHemi == "S":
epsgUsed = "epsg:32760"
transformer = UTM.getTransformer(realepsg, epsgUsed)
utme, utmn = transformer.transform(realE, realN)
return round(utmn), round(utme)
@staticmethod
def center(sourcelocs, facutmznum, fachemi):
"""
This method computes the center of a facility from the emission
location UTM coordinates. The overall facility UTM zone and hemisphere are needed.
"""
# Fill up lists of x and y coordinates of all source vertices
vertx_l = []
verty_l = []
for index, row in sourcelocs.iterrows():
vertx_l.append(row["utme"])
verty_l.append(row["utmn"])
# If this is an area source, add the other 3 corners to vertex list
if row["source_type"].upper() == "A":
angle_rad = m.radians(row["angle"])
utme1 = row["utme"] + row["lengthx"] * m.cos(angle_rad)
utmn1 = row["utmn"] - row["lengthx"] * m.sin(angle_rad)
utme2 = (row["utme"] + (row["lengthx"] * m.cos(angle_rad)) +
(row["lengthy"] * m.sin(angle_rad)))
utmn2 = (row["utmn"] + (row["lengthy"] * m.cos(angle_rad)) -
(row["lengthx"] * m.sin(angle_rad)))
utme3 = row["utme"] + row["lengthy"] * m.sin(angle_rad)
utmn3 = row["utmn"] + row["lengthy"] * m.cos(angle_rad)
vertx_l.append(utme1)
vertx_l.append(utme2)
vertx_l.append(utme3)
verty_l.append(utmn1)
verty_l.append(utmn2)
verty_l.append(utmn3)
# If this is a volume source, then add the vertices of it
if row["source_type"].upper() == "V":
utme1 = row["utme"] + row["lengthx"] * m.sqrt(2)/2
utmn1 = row["utmn"] - row["lengthy"] * m.sqrt(2)/2
utme2 = row["utme"] + row["lengthx"] * m.sqrt(2)/2
utmn2 = row["utmn"] + row["lengthy"] * m.sqrt(2)/2
utme3 = row["utme"] - row["lengthx"] * m.sqrt(2)/2
utmn3 = row["utmn"] + row["lengthy"] * m.sqrt(2)/2
vertx_l.append(utme1)
vertx_l.append(utme2)
vertx_l.append(utme3)
verty_l.append(utmn1)
verty_l.append(utmn2)
verty_l.append(utmn3)
# If line or buoyant line source, add second vertex
if row["source_type"].upper() == "N" or row["source_type"].upper() == "B":
vertx_l.append(row["utme_x2"])
verty_l.append(row["utmn_y2"])
vertx_a = np.array(vertx_l)
verty_a = np.array(verty_l)
# Combine the x and y vertices lists into list of tuples and then get a
# unique list of vertices of the form (x, y) where x=utme and y=utmn
sourceverts = list(zip(vertx_l, verty_l))
unique_verts = list(set(sourceverts))
# Find the two vertices that are the farthest apart
# Also find the corners of the modeling domain
max_dist = 0
max_x
|
lat_df = el_df[["lat"]].loc[el_df["location_type"] == "L"]
if lat_df.shape[0] > 0 and lat_df["lat"].min() < 0:
min_utmbl = "S"
|
random_line_split
|
trace.go
|
andb/api/event"
databasev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
modelv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/model/v1"
tracev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/trace/v1"
apischema "github.com/apache/skywalking-banyandb/api/schema"
"github.com/apache/skywalking-banyandb/banyand/discovery"
"github.com/apache/skywalking-banyandb/banyand/queue"
"github.com/apache/skywalking-banyandb/pkg/bus"
"github.com/apache/skywalking-banyandb/pkg/convert"
"github.com/apache/skywalking-banyandb/pkg/logger"
"github.com/apache/skywalking-banyandb/pkg/partition"
"github.com/apache/skywalking-banyandb/pkg/query/logical"
"github.com/apache/skywalking-banyandb/pkg/run"
)
var (
ErrSeriesEvents = errors.New("no seriesEvent")
ErrShardEvents = errors.New("no shardEvent")
ErrInvalidSeriesID = errors.New("invalid seriesID")
ErrServerCert = errors.New("invalid server cert file")
ErrServerKey = errors.New("invalid server key file")
ErrNoAddr = errors.New("no address")
ErrQueryMsg = errors.New("invalid query message")
defaultRecvSize = 1024 * 1024 * 10
)
type Server struct {
addr string
maxRecvMsgSize int
tls bool
certFile string
keyFile string
log *logger.Logger
ser *grpclib.Server
pipeline queue.Queue
repo discovery.ServiceRepo
shardInfo *shardInfo
seriesInfo *seriesInfo
tracev1.UnimplementedTraceServiceServer
creds credentials.TransportCredentials
}
type shardInfo struct {
log *logger.Logger
shardEventsMap map[string]uint32
sync.RWMutex
}
|
return
}
s.setShardNum(e)
s.log.Info().
Str("action", databasev1.Action_name[int32(e.Action)]).
Uint64("shardID", e.Shard.Id).
Msg("received a shard e")
return
}
func (s *shardInfo) setShardNum(eventVal *databasev1.ShardEvent) {
s.RWMutex.Lock()
defer s.RWMutex.Unlock()
idx := eventVal.Shard.Series.GetName() + "-" + eventVal.Shard.Series.GetGroup()
if eventVal.Action == databasev1.Action_ACTION_PUT {
s.shardEventsMap[idx] = eventVal.Shard.Total
} else if eventVal.Action == databasev1.Action_ACTION_DELETE {
delete(s.shardEventsMap, idx)
}
}
func (s *shardInfo) shardNum(idx string) uint32 {
s.RWMutex.RLock()
defer s.RWMutex.RUnlock()
return s.shardEventsMap[idx]
}
type seriesInfo struct {
log *logger.Logger
seriesEventsMap map[string][]int
sync.RWMutex
}
func (s *seriesInfo) Rev(message bus.Message) (resp bus.Message) {
e, ok := message.Data().(*databasev1.SeriesEvent)
if !ok {
s.log.Warn().Msg("invalid e data type")
return
}
s.updateFieldIndexCompositeSeriesID(e)
s.log.Info().
Str("action", databasev1.Action_name[int32(e.Action)]).
Str("name", e.Series.Name).
Str("group", e.Series.Group).
Msg("received a shard e")
return
}
func (s *seriesInfo) updateFieldIndexCompositeSeriesID(seriesEventVal *databasev1.SeriesEvent) {
s.RWMutex.Lock()
defer s.RWMutex.Unlock()
str := seriesEventVal.Series.GetName() + "-" + seriesEventVal.Series.GetGroup()
if seriesEventVal.Action == databasev1.Action_ACTION_PUT {
ana := logical.DefaultAnalyzer()
metadata := common.Metadata{
KindVersion: apischema.SeriesKindVersion,
Spec: seriesEventVal.Series,
}
schema, err := ana.BuildTraceSchema(context.TODO(), metadata)
if err != nil {
s.log.Err(err).Msg("build trace schema")
return
}
fieldRefs, errField := schema.CreateRef(seriesEventVal.FieldNamesCompositeSeriesId...)
if errField != nil {
s.log.Err(errField).Msg("create series ref")
return
}
refIdx := make([]int, len(fieldRefs))
for i, ref := range fieldRefs {
refIdx[i] = ref.Spec.Idx
}
s.seriesEventsMap[str] = refIdx
} else if seriesEventVal.Action == databasev1.Action_ACTION_DELETE {
delete(s.seriesEventsMap, str)
}
}
func (s *seriesInfo) FieldIndexCompositeSeriesID(seriesMeta string) []int {
s.RWMutex.RLock()
defer s.RWMutex.RUnlock()
return s.seriesEventsMap[seriesMeta]
}
func (s *Server) PreRun() error {
s.log = logger.GetLogger("liaison-grpc")
s.shardInfo.log = s.log
s.seriesInfo.log = s.log
err := s.repo.Subscribe(event.TopicShardEvent, s.shardInfo)
if err != nil {
return err
}
return s.repo.Subscribe(event.TopicSeriesEvent, s.seriesInfo)
}
func NewServer(_ context.Context, pipeline queue.Queue, repo discovery.ServiceRepo) *Server {
return &Server{
pipeline: pipeline,
repo: repo,
shardInfo: &shardInfo{shardEventsMap: make(map[string]uint32)},
seriesInfo: &seriesInfo{seriesEventsMap: make(map[string][]int)},
}
}
func (s *Server) Name() string {
return "grpc"
}
func (s *Server) FlagSet() *run.FlagSet {
fs := run.NewFlagSet("grpc")
fs.IntVarP(&s.maxRecvMsgSize, "max-recv-msg-size", "", defaultRecvSize, "The size of max receiving message")
fs.BoolVarP(&s.tls, "tls", "", true, "Connection uses TLS if true, else plain TCP")
fs.StringVarP(&s.certFile, "cert-file", "", "server_cert.pem", "The TLS cert file")
fs.StringVarP(&s.keyFile, "key-file", "", "server_key.pem", "The TLS key file")
fs.StringVarP(&s.addr, "addr", "", ":17912", "The address of banyand listens")
return fs
}
func (s *Server) Validate() error {
if s.addr == "" {
return ErrNoAddr
}
if !s.tls {
return nil
}
if s.certFile == "" {
return ErrServerCert
}
if s.keyFile == "" {
return ErrServerKey
}
creds, errTLS := credentials.NewServerTLSFromFile(s.certFile, s.keyFile)
if errTLS != nil {
return errTLS
}
s.creds = creds
return nil
}
func (s *Server) Serve() error {
lis, err := net.Listen("tcp", s.addr)
if err != nil {
s.log.Fatal().Err(err).Msg("Failed to listen")
}
if errValidate := s.Validate(); errValidate != nil {
s.log.Fatal().Err(errValidate).Msg("Failed to validate data")
}
var opts []grpclib.ServerOption
if s.tls {
opts = []grpclib.ServerOption{grpclib.Creds(s.creds)}
}
opts = append(opts, grpclib.MaxRecvMsgSize(s.maxRecvMsgSize))
s.ser = grpclib.NewServer(opts...)
tracev1.RegisterTraceServiceServer(s.ser, s)
return s.ser.Serve(lis)
}
func (s *Server) GracefulStop() {
s.log.Info().Msg("stopping")
s.ser.GracefulStop()
}
func (s *Server) computeSeriesID(writeEntity *tracev1.WriteRequest, mapIndexName string) ([]byte, error) {
fieldNames := s.seriesInfo.FieldIndexCompositeSeriesID(mapIndexName)
if fieldNames == nil {
return nil, ErrSeriesEvents
}
var str string
for _, ref := range fieldNames {
field := writeEntity.GetEntity().GetFields()[ref]
switch v := field.GetValueType().(type) {
case *modelv1.Field_StrArray:
for j := 0; j < len(v.StrArray.Value); j++ {
str = str + v.StrArray.Value[j]
}
case *modelv1.Field_IntArray:
for t := 0; t < len(v.IntArray.Value); t++ {
str = str + strconv.FormatInt(v.IntArray.Value[t], 10)
}
case *modelv1.Field_Int:
str = str + strconv.FormatInt(v.Int.Value, 10)
case *modelv1.Field_Str:
str = str + v.Str.Value
}
str = str + ":"
}
if str ==
|
func (s *shardInfo) Rev(message bus.Message) (resp bus.Message) {
e, ok := message.Data().(*databasev1.ShardEvent)
if !ok {
s.log.Warn().Msg("invalid e data type")
|
random_line_split
|
trace.go
|
andb/api/event"
databasev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
modelv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/model/v1"
tracev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/trace/v1"
apischema "github.com/apache/skywalking-banyandb/api/schema"
"github.com/apache/skywalking-banyandb/banyand/discovery"
"github.com/apache/skywalking-banyandb/banyand/queue"
"github.com/apache/skywalking-banyandb/pkg/bus"
"github.com/apache/skywalking-banyandb/pkg/convert"
"github.com/apache/skywalking-banyandb/pkg/logger"
"github.com/apache/skywalking-banyandb/pkg/partition"
"github.com/apache/skywalking-banyandb/pkg/query/logical"
"github.com/apache/skywalking-banyandb/pkg/run"
)
var (
ErrSeriesEvents = errors.New("no seriesEvent")
ErrShardEvents = errors.New("no shardEvent")
ErrInvalidSeriesID = errors.New("invalid seriesID")
ErrServerCert = errors.New("invalid server cert file")
ErrServerKey = errors.New("invalid server key file")
ErrNoAddr = errors.New("no address")
ErrQueryMsg = errors.New("invalid query message")
defaultRecvSize = 1024 * 1024 * 10
)
type Server struct {
addr string
maxRecvMsgSize int
tls bool
certFile string
keyFile string
log *logger.Logger
ser *grpclib.Server
pipeline queue.Queue
repo discovery.ServiceRepo
shardInfo *shardInfo
seriesInfo *seriesInfo
tracev1.UnimplementedTraceServiceServer
creds credentials.TransportCredentials
}
type shardInfo struct {
log *logger.Logger
shardEventsMap map[string]uint32
sync.RWMutex
}
func (s *shardInfo) Rev(message bus.Message) (resp bus.Message) {
e, ok := message.Data().(*databasev1.ShardEvent)
if !ok {
s.log.Warn().Msg("invalid e data type")
return
}
s.setShardNum(e)
s.log.Info().
Str("action", databasev1.Action_name[int32(e.Action)]).
Uint64("shardID", e.Shard.Id).
Msg("received a shard e")
return
}
func (s *shardInfo) setShardNum(eventVal *databasev1.ShardEvent) {
s.RWMutex.Lock()
defer s.RWMutex.Unlock()
idx := eventVal.Shard.Series.GetName() + "-" + eventVal.Shard.Series.GetGroup()
if eventVal.Action == databasev1.Action_ACTION_PUT {
s.shardEventsMap[idx] = eventVal.Shard.Total
} else if eventVal.Action == databasev1.Action_ACTION_DELETE {
delete(s.shardEventsMap, idx)
}
}
func (s *shardInfo) shardNum(idx string) uint32 {
s.RWMutex.RLock()
defer s.RWMutex.RUnlock()
return s.shardEventsMap[idx]
}
type seriesInfo struct {
log *logger.Logger
seriesEventsMap map[string][]int
sync.RWMutex
}
func (s *seriesInfo) Rev(message bus.Message) (resp bus.Message) {
e, ok := message.Data().(*databasev1.SeriesEvent)
if !ok {
s.log.Warn().Msg("invalid e data type")
return
}
s.updateFieldIndexCompositeSeriesID(e)
s.log.Info().
Str("action", databasev1.Action_name[int32(e.Action)]).
Str("name", e.Series.Name).
Str("group", e.Series.Group).
Msg("received a shard e")
return
}
func (s *seriesInfo) updateFieldIndexCompositeSeriesID(seriesEventVal *databasev1.SeriesEvent) {
s.RWMutex.Lock()
defer s.RWMutex.Unlock()
str := seriesEventVal.Series.GetName() + "-" + seriesEventVal.Series.GetGroup()
if seriesEventVal.Action == databasev1.Action_ACTION_PUT {
ana := logical.DefaultAnalyzer()
metadata := common.Metadata{
KindVersion: apischema.SeriesKindVersion,
Spec: seriesEventVal.Series,
}
schema, err := ana.BuildTraceSchema(context.TODO(), metadata)
if err != nil {
s.log.Err(err).Msg("build trace schema")
return
}
fieldRefs, errField := schema.CreateRef(seriesEventVal.FieldNamesCompositeSeriesId...)
if errField != nil {
s.log.Err(errField).Msg("create series ref")
return
}
refIdx := make([]int, len(fieldRefs))
for i, ref := range fieldRefs {
refIdx[i] = ref.Spec.Idx
}
s.seriesEventsMap[str] = refIdx
} else if seriesEventVal.Action == databasev1.Action_ACTION_DELETE {
delete(s.seriesEventsMap, str)
}
}
func (s *seriesInfo)
|
(seriesMeta string) []int {
s.RWMutex.RLock()
defer s.RWMutex.RUnlock()
return s.seriesEventsMap[seriesMeta]
}
func (s *Server) PreRun() error {
s.log = logger.GetLogger("liaison-grpc")
s.shardInfo.log = s.log
s.seriesInfo.log = s.log
err := s.repo.Subscribe(event.TopicShardEvent, s.shardInfo)
if err != nil {
return err
}
return s.repo.Subscribe(event.TopicSeriesEvent, s.seriesInfo)
}
func NewServer(_ context.Context, pipeline queue.Queue, repo discovery.ServiceRepo) *Server {
return &Server{
pipeline: pipeline,
repo: repo,
shardInfo: &shardInfo{shardEventsMap: make(map[string]uint32)},
seriesInfo: &seriesInfo{seriesEventsMap: make(map[string][]int)},
}
}
func (s *Server) Name() string {
return "grpc"
}
func (s *Server) FlagSet() *run.FlagSet {
fs := run.NewFlagSet("grpc")
fs.IntVarP(&s.maxRecvMsgSize, "max-recv-msg-size", "", defaultRecvSize, "The size of max receiving message")
fs.BoolVarP(&s.tls, "tls", "", true, "Connection uses TLS if true, else plain TCP")
fs.StringVarP(&s.certFile, "cert-file", "", "server_cert.pem", "The TLS cert file")
fs.StringVarP(&s.keyFile, "key-file", "", "server_key.pem", "The TLS key file")
fs.StringVarP(&s.addr, "addr", "", ":17912", "The address of banyand listens")
return fs
}
func (s *Server) Validate() error {
if s.addr == "" {
return ErrNoAddr
}
if !s.tls {
return nil
}
if s.certFile == "" {
return ErrServerCert
}
if s.keyFile == "" {
return ErrServerKey
}
creds, errTLS := credentials.NewServerTLSFromFile(s.certFile, s.keyFile)
if errTLS != nil {
return errTLS
}
s.creds = creds
return nil
}
func (s *Server) Serve() error {
lis, err := net.Listen("tcp", s.addr)
if err != nil {
s.log.Fatal().Err(err).Msg("Failed to listen")
}
if errValidate := s.Validate(); errValidate != nil {
s.log.Fatal().Err(errValidate).Msg("Failed to validate data")
}
var opts []grpclib.ServerOption
if s.tls {
opts = []grpclib.ServerOption{grpclib.Creds(s.creds)}
}
opts = append(opts, grpclib.MaxRecvMsgSize(s.maxRecvMsgSize))
s.ser = grpclib.NewServer(opts...)
tracev1.RegisterTraceServiceServer(s.ser, s)
return s.ser.Serve(lis)
}
func (s *Server) GracefulStop() {
s.log.Info().Msg("stopping")
s.ser.GracefulStop()
}
func (s *Server) computeSeriesID(writeEntity *tracev1.WriteRequest, mapIndexName string) ([]byte, error) {
fieldNames := s.seriesInfo.FieldIndexCompositeSeriesID(mapIndexName)
if fieldNames == nil {
return nil, ErrSeriesEvents
}
var str string
for _, ref := range fieldNames {
field := writeEntity.GetEntity().GetFields()[ref]
switch v := field.GetValueType().(type) {
case *modelv1.Field_StrArray:
for j := 0; j < len(v.StrArray.Value); j++ {
str = str + v.StrArray.Value[j]
}
case *modelv1.Field_IntArray:
for t := 0; t < len(v.IntArray.Value); t++ {
str = str + strconv.FormatInt(v.IntArray.Value[t], 10)
}
case *modelv1.Field_Int:
str = str + strconv.FormatInt(v.Int.Value, 10)
case *modelv1.Field_Str:
str = str + v.Str.Value
}
str = str + ":"
}
if str
|
FieldIndexCompositeSeriesID
|
identifier_name
|
trace.go
|
b/api/event"
databasev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
modelv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/model/v1"
tracev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/trace/v1"
apischema "github.com/apache/skywalking-banyandb/api/schema"
"github.com/apache/skywalking-banyandb/banyand/discovery"
"github.com/apache/skywalking-banyandb/banyand/queue"
"github.com/apache/skywalking-banyandb/pkg/bus"
"github.com/apache/skywalking-banyandb/pkg/convert"
"github.com/apache/skywalking-banyandb/pkg/logger"
"github.com/apache/skywalking-banyandb/pkg/partition"
"github.com/apache/skywalking-banyandb/pkg/query/logical"
"github.com/apache/skywalking-banyandb/pkg/run"
)
var (
ErrSeriesEvents = errors.New("no seriesEvent")
ErrShardEvents = errors.New("no shardEvent")
ErrInvalidSeriesID = errors.New("invalid seriesID")
ErrServerCert = errors.New("invalid server cert file")
ErrServerKey = errors.New("invalid server key file")
ErrNoAddr = errors.New("no address")
ErrQueryMsg = errors.New("invalid query message")
defaultRecvSize = 1024 * 1024 * 10
)
type Server struct {
addr string
maxRecvMsgSize int
tls bool
certFile string
keyFile string
log *logger.Logger
ser *grpclib.Server
pipeline queue.Queue
repo discovery.ServiceRepo
shardInfo *shardInfo
seriesInfo *seriesInfo
tracev1.UnimplementedTraceServiceServer
creds credentials.TransportCredentials
}
type shardInfo struct {
log *logger.Logger
shardEventsMap map[string]uint32
sync.RWMutex
}
func (s *shardInfo) Rev(message bus.Message) (resp bus.Message) {
e, ok := message.Data().(*databasev1.ShardEvent)
if !ok {
s.log.Warn().Msg("invalid e data type")
return
}
s.setShardNum(e)
s.log.Info().
Str("action", databasev1.Action_name[int32(e.Action)]).
Uint64("shardID", e.Shard.Id).
Msg("received a shard e")
return
}
func (s *shardInfo) setShardNum(eventVal *databasev1.ShardEvent)
|
func (s *shardInfo) shardNum(idx string) uint32 {
s.RWMutex.RLock()
defer s.RWMutex.RUnlock()
return s.shardEventsMap[idx]
}
type seriesInfo struct {
log *logger.Logger
seriesEventsMap map[string][]int
sync.RWMutex
}
func (s *seriesInfo) Rev(message bus.Message) (resp bus.Message) {
e, ok := message.Data().(*databasev1.SeriesEvent)
if !ok {
s.log.Warn().Msg("invalid e data type")
return
}
s.updateFieldIndexCompositeSeriesID(e)
s.log.Info().
Str("action", databasev1.Action_name[int32(e.Action)]).
Str("name", e.Series.Name).
Str("group", e.Series.Group).
Msg("received a shard e")
return
}
func (s *seriesInfo) updateFieldIndexCompositeSeriesID(seriesEventVal *databasev1.SeriesEvent) {
s.RWMutex.Lock()
defer s.RWMutex.Unlock()
str := seriesEventVal.Series.GetName() + "-" + seriesEventVal.Series.GetGroup()
if seriesEventVal.Action == databasev1.Action_ACTION_PUT {
ana := logical.DefaultAnalyzer()
metadata := common.Metadata{
KindVersion: apischema.SeriesKindVersion,
Spec: seriesEventVal.Series,
}
schema, err := ana.BuildTraceSchema(context.TODO(), metadata)
if err != nil {
s.log.Err(err).Msg("build trace schema")
return
}
fieldRefs, errField := schema.CreateRef(seriesEventVal.FieldNamesCompositeSeriesId...)
if errField != nil {
s.log.Err(errField).Msg("create series ref")
return
}
refIdx := make([]int, len(fieldRefs))
for i, ref := range fieldRefs {
refIdx[i] = ref.Spec.Idx
}
s.seriesEventsMap[str] = refIdx
} else if seriesEventVal.Action == databasev1.Action_ACTION_DELETE {
delete(s.seriesEventsMap, str)
}
}
func (s *seriesInfo) FieldIndexCompositeSeriesID(seriesMeta string) []int {
s.RWMutex.RLock()
defer s.RWMutex.RUnlock()
return s.seriesEventsMap[seriesMeta]
}
func (s *Server) PreRun() error {
s.log = logger.GetLogger("liaison-grpc")
s.shardInfo.log = s.log
s.seriesInfo.log = s.log
err := s.repo.Subscribe(event.TopicShardEvent, s.shardInfo)
if err != nil {
return err
}
return s.repo.Subscribe(event.TopicSeriesEvent, s.seriesInfo)
}
func NewServer(_ context.Context, pipeline queue.Queue, repo discovery.ServiceRepo) *Server {
return &Server{
pipeline: pipeline,
repo: repo,
shardInfo: &shardInfo{shardEventsMap: make(map[string]uint32)},
seriesInfo: &seriesInfo{seriesEventsMap: make(map[string][]int)},
}
}
func (s *Server) Name() string {
return "grpc"
}
func (s *Server) FlagSet() *run.FlagSet {
fs := run.NewFlagSet("grpc")
fs.IntVarP(&s.maxRecvMsgSize, "max-recv-msg-size", "", defaultRecvSize, "The size of max receiving message")
fs.BoolVarP(&s.tls, "tls", "", true, "Connection uses TLS if true, else plain TCP")
fs.StringVarP(&s.certFile, "cert-file", "", "server_cert.pem", "The TLS cert file")
fs.StringVarP(&s.keyFile, "key-file", "", "server_key.pem", "The TLS key file")
fs.StringVarP(&s.addr, "addr", "", ":17912", "The address of banyand listens")
return fs
}
func (s *Server) Validate() error {
if s.addr == "" {
return ErrNoAddr
}
if !s.tls {
return nil
}
if s.certFile == "" {
return ErrServerCert
}
if s.keyFile == "" {
return ErrServerKey
}
creds, errTLS := credentials.NewServerTLSFromFile(s.certFile, s.keyFile)
if errTLS != nil {
return errTLS
}
s.creds = creds
return nil
}
func (s *Server) Serve() error {
lis, err := net.Listen("tcp", s.addr)
if err != nil {
s.log.Fatal().Err(err).Msg("Failed to listen")
}
if errValidate := s.Validate(); errValidate != nil {
s.log.Fatal().Err(errValidate).Msg("Failed to validate data")
}
var opts []grpclib.ServerOption
if s.tls {
opts = []grpclib.ServerOption{grpclib.Creds(s.creds)}
}
opts = append(opts, grpclib.MaxRecvMsgSize(s.maxRecvMsgSize))
s.ser = grpclib.NewServer(opts...)
tracev1.RegisterTraceServiceServer(s.ser, s)
return s.ser.Serve(lis)
}
func (s *Server) GracefulStop() {
s.log.Info().Msg("stopping")
s.ser.GracefulStop()
}
func (s *Server) computeSeriesID(writeEntity *tracev1.WriteRequest, mapIndexName string) ([]byte, error) {
fieldNames := s.seriesInfo.FieldIndexCompositeSeriesID(mapIndexName)
if fieldNames == nil {
return nil, ErrSeriesEvents
}
var str string
for _, ref := range fieldNames {
field := writeEntity.GetEntity().GetFields()[ref]
switch v := field.GetValueType().(type) {
case *modelv1.Field_StrArray:
for j := 0; j < len(v.StrArray.Value); j++ {
str = str + v.StrArray.Value[j]
}
case *modelv1.Field_IntArray:
for t := 0; t < len(v.IntArray.Value); t++ {
str = str + strconv.FormatInt(v.IntArray.Value[t], 10)
}
case *modelv1.Field_Int:
str = str + strconv.FormatInt(v.Int.Value, 10)
case *modelv1.Field_Str:
str = str + v.Str.Value
}
str = str + ":"
}
if str
|
{
s.RWMutex.Lock()
defer s.RWMutex.Unlock()
idx := eventVal.Shard.Series.GetName() + "-" + eventVal.Shard.Series.GetGroup()
if eventVal.Action == databasev1.Action_ACTION_PUT {
s.shardEventsMap[idx] = eventVal.Shard.Total
} else if eventVal.Action == databasev1.Action_ACTION_DELETE {
delete(s.shardEventsMap, idx)
}
}
|
identifier_body
|
trace.go
|
b/api/event"
databasev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/database/v1"
modelv1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/model/v1"
tracev1 "github.com/apache/skywalking-banyandb/api/proto/banyandb/trace/v1"
apischema "github.com/apache/skywalking-banyandb/api/schema"
"github.com/apache/skywalking-banyandb/banyand/discovery"
"github.com/apache/skywalking-banyandb/banyand/queue"
"github.com/apache/skywalking-banyandb/pkg/bus"
"github.com/apache/skywalking-banyandb/pkg/convert"
"github.com/apache/skywalking-banyandb/pkg/logger"
"github.com/apache/skywalking-banyandb/pkg/partition"
"github.com/apache/skywalking-banyandb/pkg/query/logical"
"github.com/apache/skywalking-banyandb/pkg/run"
)
var (
ErrSeriesEvents = errors.New("no seriesEvent")
ErrShardEvents = errors.New("no shardEvent")
ErrInvalidSeriesID = errors.New("invalid seriesID")
ErrServerCert = errors.New("invalid server cert file")
ErrServerKey = errors.New("invalid server key file")
ErrNoAddr = errors.New("no address")
ErrQueryMsg = errors.New("invalid query message")
defaultRecvSize = 1024 * 1024 * 10
)
type Server struct {
addr string
maxRecvMsgSize int
tls bool
certFile string
keyFile string
log *logger.Logger
ser *grpclib.Server
pipeline queue.Queue
repo discovery.ServiceRepo
shardInfo *shardInfo
seriesInfo *seriesInfo
tracev1.UnimplementedTraceServiceServer
creds credentials.TransportCredentials
}
type shardInfo struct {
log *logger.Logger
shardEventsMap map[string]uint32
sync.RWMutex
}
func (s *shardInfo) Rev(message bus.Message) (resp bus.Message) {
e, ok := message.Data().(*databasev1.ShardEvent)
if !ok {
s.log.Warn().Msg("invalid e data type")
return
}
s.setShardNum(e)
s.log.Info().
Str("action", databasev1.Action_name[int32(e.Action)]).
Uint64("shardID", e.Shard.Id).
Msg("received a shard e")
return
}
func (s *shardInfo) setShardNum(eventVal *databasev1.ShardEvent) {
s.RWMutex.Lock()
defer s.RWMutex.Unlock()
idx := eventVal.Shard.Series.GetName() + "-" + eventVal.Shard.Series.GetGroup()
if eventVal.Action == databasev1.Action_ACTION_PUT {
s.shardEventsMap[idx] = eventVal.Shard.Total
} else if eventVal.Action == databasev1.Action_ACTION_DELETE
|
}
func (s *shardInfo) shardNum(idx string) uint32 {
s.RWMutex.RLock()
defer s.RWMutex.RUnlock()
return s.shardEventsMap[idx]
}
type seriesInfo struct {
log *logger.Logger
seriesEventsMap map[string][]int
sync.RWMutex
}
func (s *seriesInfo) Rev(message bus.Message) (resp bus.Message) {
e, ok := message.Data().(*databasev1.SeriesEvent)
if !ok {
s.log.Warn().Msg("invalid e data type")
return
}
s.updateFieldIndexCompositeSeriesID(e)
s.log.Info().
Str("action", databasev1.Action_name[int32(e.Action)]).
Str("name", e.Series.Name).
Str("group", e.Series.Group).
Msg("received a shard e")
return
}
func (s *seriesInfo) updateFieldIndexCompositeSeriesID(seriesEventVal *databasev1.SeriesEvent) {
s.RWMutex.Lock()
defer s.RWMutex.Unlock()
str := seriesEventVal.Series.GetName() + "-" + seriesEventVal.Series.GetGroup()
if seriesEventVal.Action == databasev1.Action_ACTION_PUT {
ana := logical.DefaultAnalyzer()
metadata := common.Metadata{
KindVersion: apischema.SeriesKindVersion,
Spec: seriesEventVal.Series,
}
schema, err := ana.BuildTraceSchema(context.TODO(), metadata)
if err != nil {
s.log.Err(err).Msg("build trace schema")
return
}
fieldRefs, errField := schema.CreateRef(seriesEventVal.FieldNamesCompositeSeriesId...)
if errField != nil {
s.log.Err(errField).Msg("create series ref")
return
}
refIdx := make([]int, len(fieldRefs))
for i, ref := range fieldRefs {
refIdx[i] = ref.Spec.Idx
}
s.seriesEventsMap[str] = refIdx
} else if seriesEventVal.Action == databasev1.Action_ACTION_DELETE {
delete(s.seriesEventsMap, str)
}
}
func (s *seriesInfo) FieldIndexCompositeSeriesID(seriesMeta string) []int {
s.RWMutex.RLock()
defer s.RWMutex.RUnlock()
return s.seriesEventsMap[seriesMeta]
}
func (s *Server) PreRun() error {
s.log = logger.GetLogger("liaison-grpc")
s.shardInfo.log = s.log
s.seriesInfo.log = s.log
err := s.repo.Subscribe(event.TopicShardEvent, s.shardInfo)
if err != nil {
return err
}
return s.repo.Subscribe(event.TopicSeriesEvent, s.seriesInfo)
}
func NewServer(_ context.Context, pipeline queue.Queue, repo discovery.ServiceRepo) *Server {
return &Server{
pipeline: pipeline,
repo: repo,
shardInfo: &shardInfo{shardEventsMap: make(map[string]uint32)},
seriesInfo: &seriesInfo{seriesEventsMap: make(map[string][]int)},
}
}
func (s *Server) Name() string {
return "grpc"
}
func (s *Server) FlagSet() *run.FlagSet {
fs := run.NewFlagSet("grpc")
fs.IntVarP(&s.maxRecvMsgSize, "max-recv-msg-size", "", defaultRecvSize, "The size of max receiving message")
fs.BoolVarP(&s.tls, "tls", "", true, "Connection uses TLS if true, else plain TCP")
fs.StringVarP(&s.certFile, "cert-file", "", "server_cert.pem", "The TLS cert file")
fs.StringVarP(&s.keyFile, "key-file", "", "server_key.pem", "The TLS key file")
fs.StringVarP(&s.addr, "addr", "", ":17912", "The address of banyand listens")
return fs
}
func (s *Server) Validate() error {
if s.addr == "" {
return ErrNoAddr
}
if !s.tls {
return nil
}
if s.certFile == "" {
return ErrServerCert
}
if s.keyFile == "" {
return ErrServerKey
}
creds, errTLS := credentials.NewServerTLSFromFile(s.certFile, s.keyFile)
if errTLS != nil {
return errTLS
}
s.creds = creds
return nil
}
func (s *Server) Serve() error {
lis, err := net.Listen("tcp", s.addr)
if err != nil {
s.log.Fatal().Err(err).Msg("Failed to listen")
}
if errValidate := s.Validate(); errValidate != nil {
s.log.Fatal().Err(errValidate).Msg("Failed to validate data")
}
var opts []grpclib.ServerOption
if s.tls {
opts = []grpclib.ServerOption{grpclib.Creds(s.creds)}
}
opts = append(opts, grpclib.MaxRecvMsgSize(s.maxRecvMsgSize))
s.ser = grpclib.NewServer(opts...)
tracev1.RegisterTraceServiceServer(s.ser, s)
return s.ser.Serve(lis)
}
func (s *Server) GracefulStop() {
s.log.Info().Msg("stopping")
s.ser.GracefulStop()
}
func (s *Server) computeSeriesID(writeEntity *tracev1.WriteRequest, mapIndexName string) ([]byte, error) {
fieldNames := s.seriesInfo.FieldIndexCompositeSeriesID(mapIndexName)
if fieldNames == nil {
return nil, ErrSeriesEvents
}
var str string
for _, ref := range fieldNames {
field := writeEntity.GetEntity().GetFields()[ref]
switch v := field.GetValueType().(type) {
case *modelv1.Field_StrArray:
for j := 0; j < len(v.StrArray.Value); j++ {
str = str + v.StrArray.Value[j]
}
case *modelv1.Field_IntArray:
for t := 0; t < len(v.IntArray.Value); t++ {
str = str + strconv.FormatInt(v.IntArray.Value[t], 10)
}
case *modelv1.Field_Int:
str = str + strconv.FormatInt(v.Int.Value, 10)
case *modelv1.Field_Str:
str = str + v.Str.Value
}
str = str + ":"
}
if str
|
{
delete(s.shardEventsMap, idx)
}
|
conditional_block
|
raft.go
|
() {
// Your code here (2C).
// Example:
// w := new(bytes.Buffer)
// e := labgob.NewEncoder(w)
// e.Encode(rf.xxx)
// e.Encode(rf.yyy)
// data := w.Bytes()
// rf.persister.SaveRaftState(data)
}
//
// restore previously persisted state.
//
func (rf *Raft) readPersist(data []byte) {
if data == nil || len(data) < 1 { // bootstrap without any state?
return
}
// Your code here (2C).
// Example:
// r := bytes.NewBuffer(data)
// d := labgob.NewDecoder(r)
// var xxx
// var yyy
// if d.Decode(&xxx) != nil ||
// d.Decode(&yyy) != nil {
// error...
// } else {
// rf.xxx = xxx
// rf.yyy = yyy
// }
}
//
// example RequestVote RPC arguments structure.
// field names must start with capital letters!
//
type RequestVoteArgs struct {
// Your data here (2A, 2B).
Term int
CandidateId int
LastLogIndex int
LastLogTerm int
}
//
// example RequestVote RPC reply structure.
// field names must start with capital letters!
//
type RequestVoteReply struct {
// Your data here (2A).
Term int
VoteGranted bool
}
type AppendEntriesRequest struct {
Src int
Term int
}
type AppendEntriesResponse struct {
Term int
Success bool
}
//
// example code to send a RequestVote RPC to a server.
// server is the index of the target server in rf.peers[].
// expects RPC arguments in args.
// fills in *reply with RPC reply, so caller should
// pass &reply.
// the types of the args and reply passed to Call() must be
// the same as the types of the arguments declared in the
// handler function (including whether they are pointers).
//
// The labrpc package simulates a lossy network, in which servers
// may be unreachable, and in which requests and replies may be lost.
// Call() sends a request and waits for a reply. If a reply arrives
// within a timeout interval, Call() returns true; otherwise
// Call() returns false. Thus Call() may not return for a while.
// A false return can be caused by a dead server, a live server that
// can't be reached, a lost request, or a lost reply.
//
// Call() is guaranteed to return (perhaps after a delay) *except* if the
// handler function on the server side does not return. Thus there
// is no need to implement your own timeouts around Call().
//
// look at the comments in ../labrpc/labrpc.go for more details.
//
// if you're having trouble getting RPC to work, check that you've
// capitalized all field names in structs passed over RPC, and
// that the caller passes the address of the reply struct with &, not
// the struct itself.
//
func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {
ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
return ok
}
//
// the service using Raft (e.g. a k/v server) wants to start
// agreement on the next command to be appended to Raft's log. if this
// server isn't the leader, returns false. otherwise start the
// agreement and return immediately. there is no guarantee that this
// command will ever be committed to the Raft log, since the leader
// may fail or lose an election. even if the Raft instance has been killed,
// this function should return gracefully.
//
// the first return value is the index that the command will appear at
// if it's ever committed. the second return value is the current
// term. the third return value is true if this server believes it is
// the leader.
//
func (rf *Raft) Start(command interface{}) (int, int, bool) {
index := -1
term := -1
isLeader := true
// Your code here (2B).
return index, term, isLeader
}
//
// the tester doesn't halt goroutines created by Raft after each test,
// but it does call the Kill() method. your code can use killed() to
// check whether Kill() has been called. the use of atomic avoids the
// need for a lock.
//
// the issue is that long-running goroutines use memory and may chew
// up CPU time, perhaps causing later tests to fail and generating
// confusing debug output. any goroutine with a long-running loop
// should call killed() to check whether it should stop.
//
func (rf *Raft) Kill() {
atomic.StoreInt32(&rf.dead, 1)
// Your code here, if desired.
}
func (rf *Raft) killed() bool {
z := atomic.LoadInt32(&rf.dead)
return z == 1
}
func (rf *Raft) becomeFollowerWithLock() {
if rf.role == LEADER {
rf.role = FOLLOWER
go rf.electionTimer()
} else if rf.role == CANDIDATE {
rf.role = FOLLOWER
}
}
func (rf *Raft) updateTerm(term int) {
rf.mu.Lock()
if term > rf.currentTerm {
rf.currentTerm = term
rf.votedFor = -1
rf.leaderId = -1
rf.becomeFollowerWithLock()
}
rf.mu.Unlock()
}
func (rf *Raft) VotedFor() int {
voted := -1
rf.mu.Lock()
voted = rf.votedFor
rf.mu.Unlock()
return voted
}
func (rf *Raft) setVotedFor(index int) {
rf.mu.Lock()
rf.votedFor = index
rf.mu.Unlock()
}
func (rf *Raft) GetLastLogEntryWithLock() LogEntry {
entry := LogEntry{}
if len(rf.Logs) == 0 {
entry.Term = rf.currentTerm
entry.Index = 0
} else {
entry = rf.Logs[len(rf.Logs)-1]
}
return entry
}
func (rf *Raft) GetLastLogEntry() LogEntry {
entry := LogEntry{}
rf.mu.Lock()
entry = rf.GetLastLogEntryWithLock()
rf.mu.Unlock()
return entry
}
func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {
rf.updateTerm(args.Term)
rf.mu.Lock()
reply.Term = rf.currentTerm
if args.Term < rf.currentTerm {
reply.VoteGranted = false
} else if rf.votedFor == -1 || rf.votedFor == args.CandidateId {
reply.VoteGranted = true
rf.votedFor = args.CandidateId
rf.lastHeartBeatRecieved = time.Now()
}
rf.mu.Unlock()
}
func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {
ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
return ok
}
func (rf *Raft) handleRequestVoteResponse(request RequestVoteArgs, reply RequestVoteReply) bool {
rf.updateTerm(reply.Term)
rf.mu.Lock()
if rf.currentTerm != request.Term {
rf.mu.Unlock()
return false
}
granted := reply.VoteGranted
rf.mu.Unlock()
return granted
}
func (rf *Raft) setLastHeartBeatRecieved(recievedTime time.Time) {
rf.mu.Lock()
rf.lastHeartBeatRecieved = recievedTime
rf.mu.Unlock()
}
func (rf *raft) AppendEntries(args *AppendEntriesRequest, reply *AppendEntriesResponse) {
term := args.Term
rf.updateTerm(term)
rf.mu.Lock()
if term == rf.currentTerm {
if rf.role == CANDIDATE {
rf.becomeFollowerWithLock()
}
rf.leaderId = args.Src
rf.lastHeartBeatRecieved = time.Now()
rf.currentTerm = term
}
rf.mu.Unlock()
}
func (rf *Raft) sendAppendEntries(server int, args *AppendEntriesRequest, reply *AppendEntriesResponse) bool {
ok := rf.peers[server].Call("Raft.AppendEntries", args, reply)
return ok
}
func (rf *Raft) handleAppendEntriesResponse(request AppendEntriesRequest, reply AppendEntriesResponse) {
rf.updateTerm(reply.Term)
rf.mu.Lock()
if request.term != rf.currentTerm {
rf.mu.Unlock()
}
rf.mu.Unlock()
}
func (rf *Raft) heartBeatTimer() {
for rf.Role() == LEADER
|
{
for i := 0; i < len(rf.peers); i++ {
wg.Add(1)
go func(index int, request AppendEntriesRequest, reply AppendEntriesResponse) {
ok := rf.sendAppendEntries(index, &request, &reply)
if ok {
rf.handleAppendEntriesResponse(request, reply)
}
wg.Done()
}(i, request, reply)
}
time.Sleep(time.Millisecond * 150)
}
|
conditional_block
|
|
raft.go
|
ANDIDATE
FOLLOWER
)
// return currentTerm and whether this server
// believes it is the leader.
func (rf *Raft) GetState() (int, bool) {
var term int
var isleader bool
{
rf.mu.Lock()
term = int(rf.currentTerm)
if rf.leaderId == LEADER {
isleader = true
} else {
isleader = false
}
rf.mu.Unlock()
}
return term, isleader
}
//
// save Raft's persistent state to stable storage,
// where it can later be retrieved after a crash and restart.
// see paper's Figure 2 for a description of what should be persistent.
//
func (rf *Raft) persist() {
// Your code here (2C).
|
// e.Encode(rf.xxx)
// e.Encode(rf.yyy)
// data := w.Bytes()
// rf.persister.SaveRaftState(data)
}
//
// restore previously persisted state.
//
func (rf *Raft) readPersist(data []byte) {
if data == nil || len(data) < 1 { // bootstrap without any state?
return
}
// Your code here (2C).
// Example:
// r := bytes.NewBuffer(data)
// d := labgob.NewDecoder(r)
// var xxx
// var yyy
// if d.Decode(&xxx) != nil ||
// d.Decode(&yyy) != nil {
// error...
// } else {
// rf.xxx = xxx
// rf.yyy = yyy
// }
}
//
// example RequestVote RPC arguments structure.
// field names must start with capital letters!
//
type RequestVoteArgs struct {
// Your data here (2A, 2B).
Term int
CandidateId int
LastLogIndex int
LastLogTerm int
}
//
// example RequestVote RPC reply structure.
// field names must start with capital letters!
//
type RequestVoteReply struct {
// Your data here (2A).
Term int
VoteGranted bool
}
type AppendEntriesRequest struct {
Src int
Term int
}
type AppendEntriesResponse struct {
Term int
Success bool
}
//
// example code to send a RequestVote RPC to a server.
// server is the index of the target server in rf.peers[].
// expects RPC arguments in args.
// fills in *reply with RPC reply, so caller should
// pass &reply.
// the types of the args and reply passed to Call() must be
// the same as the types of the arguments declared in the
// handler function (including whether they are pointers).
//
// The labrpc package simulates a lossy network, in which servers
// may be unreachable, and in which requests and replies may be lost.
// Call() sends a request and waits for a reply. If a reply arrives
// within a timeout interval, Call() returns true; otherwise
// Call() returns false. Thus Call() may not return for a while.
// A false return can be caused by a dead server, a live server that
// can't be reached, a lost request, or a lost reply.
//
// Call() is guaranteed to return (perhaps after a delay) *except* if the
// handler function on the server side does not return. Thus there
// is no need to implement your own timeouts around Call().
//
// look at the comments in ../labrpc/labrpc.go for more details.
//
// if you're having trouble getting RPC to work, check that you've
// capitalized all field names in structs passed over RPC, and
// that the caller passes the address of the reply struct with &, not
// the struct itself.
//
func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {
ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
return ok
}
//
// the service using Raft (e.g. a k/v server) wants to start
// agreement on the next command to be appended to Raft's log. if this
// server isn't the leader, returns false. otherwise start the
// agreement and return immediately. there is no guarantee that this
// command will ever be committed to the Raft log, since the leader
// may fail or lose an election. even if the Raft instance has been killed,
// this function should return gracefully.
//
// the first return value is the index that the command will appear at
// if it's ever committed. the second return value is the current
// term. the third return value is true if this server believes it is
// the leader.
//
func (rf *Raft) Start(command interface{}) (int, int, bool) {
index := -1
term := -1
isLeader := true
// Your code here (2B).
return index, term, isLeader
}
//
// the tester doesn't halt goroutines created by Raft after each test,
// but it does call the Kill() method. your code can use killed() to
// check whether Kill() has been called. the use of atomic avoids the
// need for a lock.
//
// the issue is that long-running goroutines use memory and may chew
// up CPU time, perhaps causing later tests to fail and generating
// confusing debug output. any goroutine with a long-running loop
// should call killed() to check whether it should stop.
//
func (rf *Raft) Kill() {
atomic.StoreInt32(&rf.dead, 1)
// Your code here, if desired.
}
func (rf *Raft) killed() bool {
z := atomic.LoadInt32(&rf.dead)
return z == 1
}
func (rf *Raft) becomeFollowerWithLock() {
if rf.role == LEADER {
rf.role = FOLLOWER
go rf.electionTimer()
} else if rf.role == CANDIDATE {
rf.role = FOLLOWER
}
}
func (rf *Raft) updateTerm(term int) {
rf.mu.Lock()
if term > rf.currentTerm {
rf.currentTerm = term
rf.votedFor = -1
rf.leaderId = -1
rf.becomeFollowerWithLock()
}
rf.mu.Unlock()
}
func (rf *Raft) VotedFor() int {
voted := -1
rf.mu.Lock()
voted = rf.votedFor
rf.mu.Unlock()
return voted
}
func (rf *Raft) setVotedFor(index int) {
rf.mu.Lock()
rf.votedFor = index
rf.mu.Unlock()
}
func (rf *Raft) GetLastLogEntryWithLock() LogEntry {
entry := LogEntry{}
if len(rf.Logs) == 0 {
entry.Term = rf.currentTerm
entry.Index = 0
} else {
entry = rf.Logs[len(rf.Logs)-1]
}
return entry
}
func (rf *Raft) GetLastLogEntry() LogEntry {
entry := LogEntry{}
rf.mu.Lock()
entry = rf.GetLastLogEntryWithLock()
rf.mu.Unlock()
return entry
}
func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {
rf.updateTerm(args.Term)
rf.mu.Lock()
reply.Term = rf.currentTerm
if args.Term < rf.currentTerm {
reply.VoteGranted = false
} else if rf.votedFor == -1 || rf.votedFor == args.CandidateId {
reply.VoteGranted = true
rf.votedFor = args.CandidateId
rf.lastHeartBeatRecieved = time.Now()
}
rf.mu.Unlock()
}
func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {
ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
return ok
}
func (rf *Raft) handleRequestVoteResponse(request RequestVoteArgs, reply RequestVoteReply) bool {
rf.updateTerm(reply.Term)
rf.mu.Lock()
if rf.currentTerm != request.Term {
rf.mu.Unlock()
return false
}
granted := reply.VoteGranted
rf.mu.Unlock()
return granted
}
func (rf *Raft) setLastHeartBeatRecieved(recievedTime time.Time) {
rf.mu.Lock()
rf.lastHeartBeatRecieved = recievedTime
rf.mu.Unlock()
}
func (rf *raft) AppendEntries(args *AppendEntriesRequest, reply *AppendEntriesResponse) {
term := args.Term
rf.updateTerm(term)
rf.mu.Lock()
if term == rf.currentTerm {
if rf.role == CANDIDATE {
rf.becomeFollowerWithLock()
}
rf.leaderId = args.Src
rf.lastHeartBeatRecieved = time.Now()
rf.currentTerm = term
}
rf.mu.Unlock()
}
func (rf *Raft) sendAppendEntries(server int, args *AppendEntriesRequest, reply *AppendEntriesResponse) bool {
ok := rf.peers[server].Call("Raft.AppendEntries", args, reply)
return ok
}
func (rf *Raft) handleAppendEntriesResponse(request AppendEntriesRequest, reply
|
// Example:
// w := new(bytes.Buffer)
// e := labgob.NewEncoder(w)
|
random_line_split
|
raft.go
|
IDATE
FOLLOWER
)
// return currentTerm and whether this server
// believes it is the leader.
func (rf *Raft) GetState() (int, bool) {
var term int
var isleader bool
{
rf.mu.Lock()
term = int(rf.currentTerm)
if rf.leaderId == LEADER {
isleader = true
} else {
isleader = false
}
rf.mu.Unlock()
}
return term, isleader
}
//
// save Raft's persistent state to stable storage,
// where it can later be retrieved after a crash and restart.
// see paper's Figure 2 for a description of what should be persistent.
//
func (rf *Raft) persist() {
// Your code here (2C).
// Example:
// w := new(bytes.Buffer)
// e := labgob.NewEncoder(w)
// e.Encode(rf.xxx)
// e.Encode(rf.yyy)
// data := w.Bytes()
// rf.persister.SaveRaftState(data)
}
//
// restore previously persisted state.
//
func (rf *Raft) readPersist(data []byte) {
if data == nil || len(data) < 1 { // bootstrap without any state?
return
}
// Your code here (2C).
// Example:
// r := bytes.NewBuffer(data)
// d := labgob.NewDecoder(r)
// var xxx
// var yyy
// if d.Decode(&xxx) != nil ||
// d.Decode(&yyy) != nil {
// error...
// } else {
// rf.xxx = xxx
// rf.yyy = yyy
// }
}
//
// example RequestVote RPC arguments structure.
// field names must start with capital letters!
//
type RequestVoteArgs struct {
// Your data here (2A, 2B).
Term int
CandidateId int
LastLogIndex int
LastLogTerm int
}
//
// example RequestVote RPC reply structure.
// field names must start with capital letters!
//
type RequestVoteReply struct {
// Your data here (2A).
Term int
VoteGranted bool
}
type AppendEntriesRequest struct {
Src int
Term int
}
type AppendEntriesResponse struct {
Term int
Success bool
}
//
// example code to send a RequestVote RPC to a server.
// server is the index of the target server in rf.peers[].
// expects RPC arguments in args.
// fills in *reply with RPC reply, so caller should
// pass &reply.
// the types of the args and reply passed to Call() must be
// the same as the types of the arguments declared in the
// handler function (including whether they are pointers).
//
// The labrpc package simulates a lossy network, in which servers
// may be unreachable, and in which requests and replies may be lost.
// Call() sends a request and waits for a reply. If a reply arrives
// within a timeout interval, Call() returns true; otherwise
// Call() returns false. Thus Call() may not return for a while.
// A false return can be caused by a dead server, a live server that
// can't be reached, a lost request, or a lost reply.
//
// Call() is guaranteed to return (perhaps after a delay) *except* if the
// handler function on the server side does not return. Thus there
// is no need to implement your own timeouts around Call().
//
// look at the comments in ../labrpc/labrpc.go for more details.
//
// if you're having trouble getting RPC to work, check that you've
// capitalized all field names in structs passed over RPC, and
// that the caller passes the address of the reply struct with &, not
// the struct itself.
//
func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {
ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
return ok
}
//
// the service using Raft (e.g. a k/v server) wants to start
// agreement on the next command to be appended to Raft's log. if this
// server isn't the leader, returns false. otherwise start the
// agreement and return immediately. there is no guarantee that this
// command will ever be committed to the Raft log, since the leader
// may fail or lose an election. even if the Raft instance has been killed,
// this function should return gracefully.
//
// the first return value is the index that the command will appear at
// if it's ever committed. the second return value is the current
// term. the third return value is true if this server believes it is
// the leader.
//
func (rf *Raft) Start(command interface{}) (int, int, bool) {
index := -1
term := -1
isLeader := true
// Your code here (2B).
return index, term, isLeader
}
//
// the tester doesn't halt goroutines created by Raft after each test,
// but it does call the Kill() method. your code can use killed() to
// check whether Kill() has been called. the use of atomic avoids the
// need for a lock.
//
// the issue is that long-running goroutines use memory and may chew
// up CPU time, perhaps causing later tests to fail and generating
// confusing debug output. any goroutine with a long-running loop
// should call killed() to check whether it should stop.
//
func (rf *Raft) Kill() {
atomic.StoreInt32(&rf.dead, 1)
// Your code here, if desired.
}
func (rf *Raft) killed() bool {
z := atomic.LoadInt32(&rf.dead)
return z == 1
}
func (rf *Raft) becomeFollowerWithLock() {
if rf.role == LEADER {
rf.role = FOLLOWER
go rf.electionTimer()
} else if rf.role == CANDIDATE {
rf.role = FOLLOWER
}
}
func (rf *Raft) updateTerm(term int) {
rf.mu.Lock()
if term > rf.currentTerm {
rf.currentTerm = term
rf.votedFor = -1
rf.leaderId = -1
rf.becomeFollowerWithLock()
}
rf.mu.Unlock()
}
func (rf *Raft) VotedFor() int {
voted := -1
rf.mu.Lock()
voted = rf.votedFor
rf.mu.Unlock()
return voted
}
func (rf *Raft) setVotedFor(index int) {
rf.mu.Lock()
rf.votedFor = index
rf.mu.Unlock()
}
func (rf *Raft) GetLastLogEntryWithLock() LogEntry {
entry := LogEntry{}
if len(rf.Logs) == 0 {
entry.Term = rf.currentTerm
entry.Index = 0
} else {
entry = rf.Logs[len(rf.Logs)-1]
}
return entry
}
func (rf *Raft) GetLastLogEntry() LogEntry {
entry := LogEntry{}
rf.mu.Lock()
entry = rf.GetLastLogEntryWithLock()
rf.mu.Unlock()
return entry
}
func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {
rf.updateTerm(args.Term)
rf.mu.Lock()
reply.Term = rf.currentTerm
if args.Term < rf.currentTerm {
reply.VoteGranted = false
} else if rf.votedFor == -1 || rf.votedFor == args.CandidateId {
reply.VoteGranted = true
rf.votedFor = args.CandidateId
rf.lastHeartBeatRecieved = time.Now()
}
rf.mu.Unlock()
}
func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {
ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
return ok
}
func (rf *Raft) handleRequestVoteResponse(request RequestVoteArgs, reply RequestVoteReply) bool {
rf.updateTerm(reply.Term)
rf.mu.Lock()
if rf.currentTerm != request.Term {
rf.mu.Unlock()
return false
}
granted := reply.VoteGranted
rf.mu.Unlock()
return granted
}
func (rf *Raft) setLastHeartBeatRecieved(recievedTime time.Time) {
rf.mu.Lock()
rf.lastHeartBeatRecieved = recievedTime
rf.mu.Unlock()
}
func (rf *raft) AppendEntries(args *AppendEntriesRequest, reply *AppendEntriesResponse) {
term := args.Term
rf.updateTerm(term)
rf.mu.Lock()
if term == rf.currentTerm {
if rf.role == CANDIDATE {
rf.becomeFollowerWithLock()
}
rf.leaderId = args.Src
rf.lastHeartBeatRecieved = time.Now()
rf.currentTerm = term
}
rf.mu.Unlock()
}
func (rf *Raft) sendAppendEntries(server int, args *AppendEntriesRequest, reply *AppendEntriesResponse) bool
|
func (rf *Raft) handleAppendEntriesResponse(request AppendEntriesRequest,
|
{
ok := rf.peers[server].Call("Raft.AppendEntries", args, reply)
return ok
}
|
identifier_body
|
raft.go
|
IDATE
FOLLOWER
)
// return currentTerm and whether this server
// believes it is the leader.
func (rf *Raft) GetState() (int, bool) {
var term int
var isleader bool
{
rf.mu.Lock()
term = int(rf.currentTerm)
if rf.leaderId == LEADER {
isleader = true
} else {
isleader = false
}
rf.mu.Unlock()
}
return term, isleader
}
//
// save Raft's persistent state to stable storage,
// where it can later be retrieved after a crash and restart.
// see paper's Figure 2 for a description of what should be persistent.
//
func (rf *Raft) persist() {
// Your code here (2C).
// Example:
// w := new(bytes.Buffer)
// e := labgob.NewEncoder(w)
// e.Encode(rf.xxx)
// e.Encode(rf.yyy)
// data := w.Bytes()
// rf.persister.SaveRaftState(data)
}
//
// restore previously persisted state.
//
func (rf *Raft) readPersist(data []byte) {
if data == nil || len(data) < 1 { // bootstrap without any state?
return
}
// Your code here (2C).
// Example:
// r := bytes.NewBuffer(data)
// d := labgob.NewDecoder(r)
// var xxx
// var yyy
// if d.Decode(&xxx) != nil ||
// d.Decode(&yyy) != nil {
// error...
// } else {
// rf.xxx = xxx
// rf.yyy = yyy
// }
}
//
// example RequestVote RPC arguments structure.
// field names must start with capital letters!
//
type RequestVoteArgs struct {
// Your data here (2A, 2B).
Term int
CandidateId int
LastLogIndex int
LastLogTerm int
}
//
// example RequestVote RPC reply structure.
// field names must start with capital letters!
//
type RequestVoteReply struct {
// Your data here (2A).
Term int
VoteGranted bool
}
type AppendEntriesRequest struct {
Src int
Term int
}
type AppendEntriesResponse struct {
Term int
Success bool
}
//
// example code to send a RequestVote RPC to a server.
// server is the index of the target server in rf.peers[].
// expects RPC arguments in args.
// fills in *reply with RPC reply, so caller should
// pass &reply.
// the types of the args and reply passed to Call() must be
// the same as the types of the arguments declared in the
// handler function (including whether they are pointers).
//
// The labrpc package simulates a lossy network, in which servers
// may be unreachable, and in which requests and replies may be lost.
// Call() sends a request and waits for a reply. If a reply arrives
// within a timeout interval, Call() returns true; otherwise
// Call() returns false. Thus Call() may not return for a while.
// A false return can be caused by a dead server, a live server that
// can't be reached, a lost request, or a lost reply.
//
// Call() is guaranteed to return (perhaps after a delay) *except* if the
// handler function on the server side does not return. Thus there
// is no need to implement your own timeouts around Call().
//
// look at the comments in ../labrpc/labrpc.go for more details.
//
// if you're having trouble getting RPC to work, check that you've
// capitalized all field names in structs passed over RPC, and
// that the caller passes the address of the reply struct with &, not
// the struct itself.
//
func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {
ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
return ok
}
//
// the service using Raft (e.g. a k/v server) wants to start
// agreement on the next command to be appended to Raft's log. if this
// server isn't the leader, returns false. otherwise start the
// agreement and return immediately. there is no guarantee that this
// command will ever be committed to the Raft log, since the leader
// may fail or lose an election. even if the Raft instance has been killed,
// this function should return gracefully.
//
// the first return value is the index that the command will appear at
// if it's ever committed. the second return value is the current
// term. the third return value is true if this server believes it is
// the leader.
//
func (rf *Raft) Start(command interface{}) (int, int, bool) {
index := -1
term := -1
isLeader := true
// Your code here (2B).
return index, term, isLeader
}
//
// the tester doesn't halt goroutines created by Raft after each test,
// but it does call the Kill() method. your code can use killed() to
// check whether Kill() has been called. the use of atomic avoids the
// need for a lock.
//
// the issue is that long-running goroutines use memory and may chew
// up CPU time, perhaps causing later tests to fail and generating
// confusing debug output. any goroutine with a long-running loop
// should call killed() to check whether it should stop.
//
func (rf *Raft) Kill() {
atomic.StoreInt32(&rf.dead, 1)
// Your code here, if desired.
}
func (rf *Raft) killed() bool {
z := atomic.LoadInt32(&rf.dead)
return z == 1
}
func (rf *Raft) becomeFollowerWithLock() {
if rf.role == LEADER {
rf.role = FOLLOWER
go rf.electionTimer()
} else if rf.role == CANDIDATE {
rf.role = FOLLOWER
}
}
func (rf *Raft) updateTerm(term int) {
rf.mu.Lock()
if term > rf.currentTerm {
rf.currentTerm = term
rf.votedFor = -1
rf.leaderId = -1
rf.becomeFollowerWithLock()
}
rf.mu.Unlock()
}
func (rf *Raft) VotedFor() int {
voted := -1
rf.mu.Lock()
voted = rf.votedFor
rf.mu.Unlock()
return voted
}
func (rf *Raft) setVotedFor(index int) {
rf.mu.Lock()
rf.votedFor = index
rf.mu.Unlock()
}
func (rf *Raft) GetLastLogEntryWithLock() LogEntry {
entry := LogEntry{}
if len(rf.Logs) == 0 {
entry.Term = rf.currentTerm
entry.Index = 0
} else {
entry = rf.Logs[len(rf.Logs)-1]
}
return entry
}
func (rf *Raft) GetLastLogEntry() LogEntry {
entry := LogEntry{}
rf.mu.Lock()
entry = rf.GetLastLogEntryWithLock()
rf.mu.Unlock()
return entry
}
func (rf *Raft) RequestVote(args *RequestVoteArgs, reply *RequestVoteReply) {
rf.updateTerm(args.Term)
rf.mu.Lock()
reply.Term = rf.currentTerm
if args.Term < rf.currentTerm {
reply.VoteGranted = false
} else if rf.votedFor == -1 || rf.votedFor == args.CandidateId {
reply.VoteGranted = true
rf.votedFor = args.CandidateId
rf.lastHeartBeatRecieved = time.Now()
}
rf.mu.Unlock()
}
func (rf *Raft)
|
(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {
ok := rf.peers[server].Call("Raft.RequestVote", args, reply)
return ok
}
func (rf *Raft) handleRequestVoteResponse(request RequestVoteArgs, reply RequestVoteReply) bool {
rf.updateTerm(reply.Term)
rf.mu.Lock()
if rf.currentTerm != request.Term {
rf.mu.Unlock()
return false
}
granted := reply.VoteGranted
rf.mu.Unlock()
return granted
}
func (rf *Raft) setLastHeartBeatRecieved(recievedTime time.Time) {
rf.mu.Lock()
rf.lastHeartBeatRecieved = recievedTime
rf.mu.Unlock()
}
func (rf *raft) AppendEntries(args *AppendEntriesRequest, reply *AppendEntriesResponse) {
term := args.Term
rf.updateTerm(term)
rf.mu.Lock()
if term == rf.currentTerm {
if rf.role == CANDIDATE {
rf.becomeFollowerWithLock()
}
rf.leaderId = args.Src
rf.lastHeartBeatRecieved = time.Now()
rf.currentTerm = term
}
rf.mu.Unlock()
}
func (rf *Raft) sendAppendEntries(server int, args *AppendEntriesRequest, reply *AppendEntriesResponse) bool {
ok := rf.peers[server].Call("Raft.AppendEntries", args, reply)
return ok
}
func (rf *Raft) handleAppendEntriesResponse(request AppendEntriesRequest, reply
|
sendRequestVote
|
identifier_name
|
waxxedit.js
|
//h1:"h1",
//h2:"h2",
//h3:"h3",
|
//upload:"upload"
},
configuration:function(editor_id,textareaId,width,height,color){
wedit.configs.editor_id = editor_id || this.configs.editor_id;
wedit.configs.textareaId = textareaId || this.configs.textareaId;
wedit.configs.width = width || this.configs.width;
wedit.configs.height = height || this.configs.height;
wedit.configs.border = this.configs.border;
wedit.configs.backgroundColor = color || this.configs.backgroundColor;
},
//创建编辑器
init:function(editor_id,textareaId,width,height,color){
this.configuration(editor_id,textareaId,width,height,color);
this.container();
this.toolbar();
this.content();
wedit.uMethods.winResize();
},
//画一个编辑器画布
container:function(){
wedit.contentObj = this.uMethods.getEleById(wedit.configs.editor_id);
wedit.contentObj.style.position = wedit.configs.position;
wedit.contentObj.style.width = wedit.configs.width;
wedit.contentObj.style.height = wedit.configs.height;
wedit.contentObj.style.border = wedit.configs.border;
wedit.contentObj.style.backgroundColor = wedit.configs.backgroundColor;
},
//编辑器菜单栏
toolbar:function(){
wedit.toolbarObj = this.uMethods.createEle("div");
wedit.contentObj.appendChild(this.toolClass.setTooBgColor(wedit.toolbarObj,wedit.configs.toolbarW,wedit.configs.toolbarH,wedit.configs.toobarBorder,wedit.configs.toobarBgColor));
//创建UL标签
var ulObj = this.uMethods.createEle("ul","ul");
ulObj.className = wedit.configs.ulClassName;
//加载菜单栏按钮
this.toolMethods.loadToolBarMenu(ulObj);
wedit.toolbarObj.appendChild(ulObj);
//菜单栏样式调整
//加载菜单栏绑定事件
wedit.toolMethods.createCodeFramework();
wedit.toolMethods.getSelectContextChange();
},
//获取编辑器工具栏方法
toolClass:{
setTooBgColor:function(obj,w,h,b,Color){
obj.style.width = w;
obj.style.height = h;
obj.style.border = b;
obj.style.backgroundColor = Color;
return obj
},
setToolBarMenuStyle:function(ulObj){
ulObj.style.cssText="list-style-type:none;margin:0px;padding:0px;";
},
setCodeFrameworkStyle:function(obj){
obj.style.cssText ="position:fixed;top:20%;left:30%;background:#fff;border:1px solid #ccc;border-radius:12px;width:650px;height:350px;z-Index:991;";
obj.childNodes[0].style.cssText="height:35px;line-height:35px;font-size:14px;background:#f5f5f5;padding-left:15px;font-weight:bold;";
obj.childNodes[0].innerHTML ="<span>插入代码块</span>";
obj.childNodes[1].style.cssText ="position:absolute;right:10px;top:10px;border:1px solid #333;width:20px;height:20px;line-height:20px;text-align:center;font-size:16px;cursor:pointer;"
obj.childNodes[2].style.cssText="width:98%;height:240px;text-align:left;";
obj.childNodes[3].style.cssText="width:98%;height:60px;line-height:60px;text-align:right;border:0px solid #999;";
obj.childNodes[3].childNodes[0].value = "插入代码";
obj.childNodes[3].childNodes[0].style.cssText ="width:120px;height:35px;line-height:35px;background:#4f5b93;color:#fff;border:0px;border-radius:12px;";
}
},
toolMethods:{
loadToolBarMenu:function(ulObj){
wedit.toolClass.setToolBarMenuStyle(ulObj)
for(var i in wedit.tbarMenu){
let liObj = wedit.uMethods.createEle("li");
liObj.innerText = i;
liObj.setAttribute("id",i);
liObj.style.cssText="display:inline-block;float:left;font-size:12px;width:45px;height:50px;line-height:50px;border:1px solid #ccc;text-align:center;cursor:pointer";
ulObj.appendChild(liObj);
}
},
setTextareaStyle:function(obj){
obj.style.width = wedit.configs.textareaW;
obj.style.height = wedit.configs.textareaH;
obj.style.wordWrap ="break-word";
obj.style.padding ="10px";
obj.style.overflow ="scroll";
},
createTbarButton:function(ele,type){
return wedit.uMethods.createEle(ele,type)
},
insetIntoCode:function(id,tid,sid,dvid){
var insertObj = wedit.uMethods.getEleById(id);
insertObj.addEventListener("click",function(){
wedit.textareaObj = wedit.uMethods.getEleById(wedit.configs.textareaId);
wedit.textareaObj.innerHTML += "\r\n<pre><code>"+ wedit.toolMethods.textareaStrChageCode(wedit.uMethods.getEleById(tid).value)+"</code></pre>\r\n";
//关闭插入代码块
wedit.toolMethods.closeCodeDailog(sid,dvid);
//关闭弹窗
wedit.uMethods.getEleById(dvid).remove();
wedit.uMethods.getEleById(wedit.configs.shadeDvId).remove();
},false);
},
closeCodeDailog:function(sid,dvid){
wedit.uMethods.getEleById(sid).addEventListener('click',function(e){
wedit.uMethods.getEleById(dvid).remove();
wedit.uMethods.getEleById(wedit.configs.shadeDvId).remove();
},false);
},
//直接关闭弹窗,移除。
closeDailog:function(){
wedit.uMethods.getEleById(wedit.configs.codeFrameId).remove();
wedit.uMethods.getEleById(wedit.configs.shadeDvId).remove();
},
//创CODE代码框
createCodeFramework:function(){
wedit.uMethods.getEleById("code").onclick=function(){
//加载遮罩层
wedit.uMethods.createShadeDv();
//创建弹窗 title, content, button , DIV
var dvObj = wedit.uMethods.createEle("div");
wedit.uMethods.setElementAttr(dvObj,"id","codeFrame");
//var codeObj = wedit.uMethods.getEleById(wedit.configs.editor_id);
document.body.appendChild(dvObj);
var dvObjTitle = wedit.uMethods.createEle("div");
wedit.uMethods.setElementAttr(dvObjTitle,"class","codeTitle");
dvObj.appendChild(dvObjTitle);
var dvObjSpan = wedit.uMethods.createEle("span");
dvObjSpan.innerText = "X";
wedit.uMethods.setElementAttr(dvObjSpan,"id","codeCloseSpan");
dvObj.appendChild(dvObjSpan);
var dvObjContent = wedit.uMethods.createEle("textarea");
wedit.uMethods.setElementAttr(dvObjContent,"id","codeContent");
dvObj.appendChild(dvObjContent);
var dvObjFooter = wedit.uMethods.createEle("div");
dvObjFooter.className = 'codeFooter';
dvObj.appendChild(dvObjFooter);
var dvObjInput= wedit.uMethods.createEle("input");
dvObjInput.type="button";
wedit.uMethods.setElementAttr(dvObjInput,"id","codeButton");
dvObjFooter.appendChild(dvObjInput);
wedit.toolClass.setCodeFrameworkStyle(dvObj);
//加载关闭弹窗事件
wedit.toolMethods.closeCodeDailog(wedit.configs.codeCloseSpanId,wedit.configs.codeFrameId);
wedit.toolMethods.insetIntoCode("codeButton","codeContent","codeCloseSpan","codeFrame");
wedit.uMethods.autoLoadWinWidth();
};
},
//code 字符串处理;
textareaStrChageCode:function(str){
let st = str.replace(/</g,"<");
st = st.replace(/>/g,">");
return st;
},
getSelectContextChange:function(obj){
for(var j in wedit.tbarMenu){
this.getSelConChange(j);
}
},
getSelConChange:function(j){
wedit.uMethods.getEleById(j).addEventListener("click",function(){
//wedit.uMethods.getEleById("")
},false);
},
},
//编辑器内容框
content:function(){
wedit.conTextObj = this.uMethods.createEle("div");
wedit.contentObj.appendChild(this.toolClass.setTooBg
|
//h4:"h4",
|
random_line_split
|
waxxedit.js
|
s.toolbarH,wedit.configs.toobarBorder,wedit.configs.toobarBgColor));
//创建UL标签
var ulObj = this.uMethods.createEle("ul","ul");
ulObj.className = wedit.configs.ulClassName;
//加载菜单栏按钮
this.toolMethods.loadToolBarMenu(ulObj);
wedit.toolbarObj.appendChild(ulObj);
//菜单栏样式调整
//加载菜单栏绑定事件
wedit.toolMethods.createCodeFramework();
wedit.toolMethods.getSelectContextChange();
},
//获取编辑器工具栏方法
toolClass:{
setTooBgColor:function(obj,w,h,b,Color){
obj.style.width = w;
obj.style.height = h;
obj.style.border = b;
obj.style.backgroundColor = Color;
return obj
},
setToolBarMenuStyle:function(ulObj){
ulObj.style.cssText="list-style-type:none;margin:0px;padding:0px;";
},
setCodeFrameworkStyle:function(obj){
obj.style.cssText ="position:fixed;top:20%;left:30%;background:#fff;border:1px solid #ccc;border-radius:12px;width:650px;height:350px;z-Index:991;";
obj.childNodes[0].style.cssText="height:35px;line-height:35px;font-size:14px;background:#f5f5f5;padding-left:15px;font-weight:bold;";
obj.childNodes[0].innerHTML ="<span>插入代码块</span>";
obj.childNodes[1].style.cssText ="position:absolute;right:10px;top:10px;border:1px solid #333;width:20px;height:20px;line-height:20px;text-align:center;font-size:16px;cursor:pointer;"
obj.childNodes[2].style.cssText="width:98%;height:240px;text-align:left;";
obj.childNodes[3].style.cssText="width:98%;height:60px;line-height:60px;text-align:right;border:0px solid #999;";
obj.childNodes[3].childNodes[0].value = "插入代码";
obj.childNodes[3].childNodes[0].style.cssText ="width:120px;height:35px;line-height:35px;background:#4f5b93;color:#fff;border:0px;border-radius:12px;";
}
},
toolMethods:{
loadToolBarMenu:function(ulObj){
wedit.toolClass.setToolBarMenuStyle(ulObj)
for(var i in wedit.tbarMenu){
let liObj = wedit.uMethods.createEle("li");
liObj.innerText = i;
liObj.setAttribute("id",i);
liObj.style.cssText="display:inline-block;float:left;font-size:12px;width:45px;height:50px;line-height:50px;border:1px solid #ccc;text-align:center;cursor:pointer";
ulObj.appendChild(liObj);
}
},
setTextareaStyle:function(obj){
obj.style.width = wedit.configs.textareaW;
obj.style.height = wedit.configs.textareaH;
obj.style.wordWrap ="break-word";
obj.style.padding ="10px";
obj.style.overflow ="scroll";
},
createTbarButton:function(ele,type){
return wedit.uMethods.createEle(ele,type)
},
insetIntoCode:function(id,tid,sid,dvid){
var insertObj = wedit.uMethods.getEleById(id);
insertObj.addEventListener("click",function(){
wedit.textareaObj = wedit.uMethods.getEleById(wedit.configs.textareaId);
wedit.textareaObj.innerHTML += "\r\n<pre><code>"+ wedit.toolMethods.textareaStrChageCode(wedit.uMethods.getEleById(tid).value)+"</code></pre>\r\n";
//关闭插入代码块
wedit.toolMethods.closeCodeDailog(sid,dvid);
//关闭弹窗
wedit.uMethods.getEleById(dvid).remove();
wedit.uMethods.getEleById(wedit.configs.shadeDvId).remove();
},false);
},
closeCodeDailog:function(sid,dvid){
wedit.uMethods.getEleById(sid).addEventListener('click',function(e){
wedit.uMethods.getEleById(dvid).remove();
wedit.uMethods.getEleById(wedit.configs.shadeDvId).remove();
},false);
},
//直接关闭弹窗,移除。
closeDailog:function(){
wedit.uMethods.getEleById(wedit.configs.codeFrameId).remove();
wedit.uMethods.getEleById(wedit.configs.shadeDvId).remove();
},
//创CODE代码框
createCodeFramework:function(){
wedit.uMethods.getEleById("code").onclick=function(){
//加载遮罩层
wedit.uMethods.createShadeDv();
//创建弹窗 title, content, button , DIV
var dvObj = wedit.uMethods.createEle("div");
wedit.uMethods.setElementAttr(dvObj,"id","codeFrame");
//var codeObj = wedit.uMethods.getEleById(wedit.configs.editor_id);
document.body.appendChild(dvObj);
var dvObjTitle = wedit.uMethods.createEle("div");
wedit.uMethods.setElementAttr(dvObjTitle,"class","codeTitle");
dvObj.appendChild(dvObjTitle);
var dvObjSpan = wedit.uMethods.createEle("span");
dvObjSpan.innerText = "X";
wedit.uMethods.setElementAttr(dvObjSpan,"id","codeCloseSpan");
dvObj.appendChild(dvObjSpan);
var dvObjContent = wedit.uMethods.createEle("textarea");
wedit.uMethods.setElementAttr(dvObjContent,"id","codeContent");
dvObj.appendChild(dvObjContent);
var dvObjFooter = wedit.uMethods.createEle("div");
dvObjFooter.className = 'codeFooter';
dvObj.appendChild(dvObjFooter);
var dvObjInput= wedit.uMethods.createEle("input");
dvObjInput.type="button";
wedit.uMethods.setElementAttr(dvObjInput,"id","codeButton");
dvObjFooter.appendChild(dvObjInput);
wedit.toolClass.setCodeFrameworkStyle(dvObj);
//加载关闭弹窗事件
wedit.toolMethods.closeCodeDailog(wedit.configs.codeCloseSpanId,wedit.configs.codeFrameId);
wedit.toolMethods.insetIntoCode("codeButton","codeContent","codeCloseSpan","codeFrame");
wedit.uMethods.autoLoadWinWidth();
};
},
//code 字符串处理;
textareaStrChageCode:function(str){
let st = str.replace(/</g,"<");
st = st.replace(/>/g,">");
return st;
},
getSelectContextChange:function(obj){
for(var j in wedit.tbarMenu){
this.getSelConChange(j);
}
},
getSelConChange:function(j){
wedit.uMethods.getEleById(j).addEventListener("click",function(){
//wedit.uMethods.getEleById("")
},false);
},
},
//编辑器内容框
content:function(){
wedit.conTextObj = this.uMethods.createEle("div");
wedit.contentObj.appendChild(this.toolClass.setTooBgColor(wedit.conTextObj,wedit.configs.conTextW,wedit.configs.conTextH,wedit.configs.conTextBorder,wedit.configs.conTextBgCOlor));
//wedit.textareaObj = this.uMethods.createEle("textarea",'textarea');
wedit.textareaObj = this.uMethods.createEle("div");
wedit.textareaObj.setAttribute("id",wedit.configs.textareaId);
wedit.textareaObj.setAttribute("name","content");
wedit.textareaObj.setAttribute("contenteditable","true");
this.toolMethods.setTextareaStyle(wedit.textareaObj);
wedit.conTextObj.appendChild(wedit.textareaObj);
wedit.contentMethods.textareaOnSelectEvent(wedit.configs.textareaId);
},
//对编辑器内容的操作
contentClass:{
},
contentMethods:{
//触发内容选中的事件;
textareaOnSelectEvent:function(tid){
wedit.uMethods.getEleById(tid).addEventListener("select",function(e){
e.originalTarget.value;
},false);
}
},
//常用的操作方法
uMethods:{
getEleById:function(id){
return document.getElementById(id);
},
getEleByTagName(tag){
return document.getElementsByClassName(tag);
},
createEle:function(ele,type){
var eLe = document.createElement(ele);
if(type == 'div'){
eLe.className = wedit.configs.toolbarClassName;
}else if(type == 'textarea'){
eLe.className = wedit.configs.textareaClassName;
}else if(type == 'h1'){
|
ele.className = wedit.tbarMenu.h1;
}else if(type
|
identifier_body
|
|
waxxedit.js
|
Class.setTooBgColor(wedit.toolbarObj,wedit.configs.toolbarW,wedit.configs.toolbarH,wedit.configs.toobarBorder,wedit.configs.toobarBgColor));
//创建UL标签
var ulObj = this.uMethods.createEle("ul","ul");
ulObj.className = wedit.configs.ulClassName;
//加载菜单栏按钮
this.toolMethods.loadToolBarMenu(ulObj);
wedit.toolbarObj.appendChild(ulObj);
//菜单栏样式调整
//加载菜单栏绑定事件
wedit.toolMethods.createCodeFramework();
wedit.toolMethods.getSelectContextChange();
},
//获取编辑器工具栏方法
toolClass:{
setTooBgColor:function(obj,w,h,b,Color){
obj.style.width = w;
obj.style.height = h;
obj.style.border = b;
obj.style.backgroundColor = Color;
return obj
},
setToolBarMenuStyle:function(ulObj){
ulObj.style.cssText="list-style-type:none;margin:0px;padding:0px;";
},
setCodeFrameworkStyle:function(obj){
obj.style.cssText ="position:fixed;top:20%;left:30%;background:#fff;border:1px solid #ccc;border-radius:12px;width:650px;height:350px;z-Index:991;";
obj.childNodes[0].style.cssText="height:35px;line-height:35px;font-size:14px;background:#f5f5f5;padding-left:15px;font-weight:bold;";
obj.childNodes[0].innerHTML ="<span>插入代码块</span>";
obj.childNodes[1].style.cssText ="position:absolute;right:10px;top:10px;border:1px solid #333;width:20px;height:20px;line-height:20px;text-align:center;font-size:16px;cursor:pointer;"
obj.childNodes[2].style.cssText="width:98%;height:240px;text-align:left;";
obj.childNodes[3].style.cssText="width:98%;height:60px;line-height:60px;text-align:right;border:0px solid #999;";
obj.childNodes[3].childNodes[0].value = "插入代码";
obj.childNodes[3].childNodes[0].style.cssText ="width:120px;height:35px;line-height:35px;background:#4f5b93;color:#fff;border:0px;border-radius:12px;";
}
},
toolMethods:{
loadToolBarMenu:function(ulObj){
wedit.toolClass.setToolBarMenuStyle(ulObj)
for(var i in wedit.tbarMenu){
let liObj = wedit.uMethods.createEle("li");
liObj.innerText = i;
liObj.setAttribute("id",i);
liObj.style.cssText="display:inline-block;float:left;font-size:12px;width:45px;height:50px;line-height:50px;border:1px solid #ccc;text-align:center;cursor:pointer";
ulObj.appendChild(liObj);
}
},
setTextareaStyle:function(obj){
obj.style.width = wedit.configs.textareaW;
obj.style.height = wedit.configs.textareaH;
obj.style.wordWrap ="break-word";
obj.style.padding ="10px";
obj.style.overflow ="scroll";
},
createTbarButton:function(ele,type){
return wedit.uMethods.createEle(ele,type)
},
insetIntoCode:function(id,tid,sid,dvid){
var insertObj = wedit.uMethods.getEleById(id);
insertObj.addEventListener("click",function(){
wedit.textareaObj = wedit.uMethods.getEleById(wedit.configs.textareaId);
wedit.textareaObj.innerHTML += "\r\n<pre><code>"+ wedit.toolMethods.textareaStrChageCode(wedit.uMethods.getEleById(tid).value)+"</code></pre>\r\n";
//关闭插入代码块
wedit.toolMethods.closeCodeDailog(sid,dvid);
//关闭弹窗
wedit.uMethods.getEleById(dvid).remove();
wedit.uMethods.getEleById(wedit.configs.shadeDvId).remove();
},false);
},
closeCodeDailog:function(sid,dvid){
wedit.uMethods.getEleById(sid).addEventListener('click',function(e){
wedit.uMethods.getEleById(dvid).remove();
wedit.uMethods.getEleById(wedit.configs.shadeDvId).remove();
},false);
},
//直接关闭弹窗,移除。
closeDailog:function(){
wedit.uMethods.getEleById(wedit.configs.codeFrameId).remove();
wedit.uMethods.getEleById(wedit.configs.shadeDvId).remove();
},
//创CODE代码框
createCodeFramework:function(){
wedit.uMethods.getEleById("code").onclick=function(){
//加载遮罩层
wedit.uMethods.createShadeDv();
//创建弹窗 title, content, button , DIV
var dvObj = wedit.uMethods.createEle("div");
wedit.uMethods.setElementAttr(dvObj,"id","codeFrame");
//var codeObj = wedit.uMethods.getEleById(wedit.configs.editor_id);
document.body.appendChild(dvObj);
var dvObjTitle = wedit.uMethods.createEle("div");
wedit.uMethods.setElementAttr(dvObjTitle,"class","codeTitle");
dvObj.appendChild(dvObjTitle);
var dvObjSpan = wedit.uMethods.createEle("span");
dvObjSpan.innerText = "X";
wedit.uMethods.setElementAttr(dvObjSpan,"id","codeCloseSpan");
dvObj.appendChild(dvObjSpan);
var dvObjContent = wedit.uMethods.createEle("textarea");
wedit.uMethods.setElementAttr(dvObjContent,"id","codeContent");
dvObj.appendChild(dvObjContent);
var dvObjFooter = wedit.uMethods.createEle("div");
dvObjFooter.className = 'codeFooter';
dvObj.appendChild(dvObjFooter);
var dvObjInput= wedit.uMethods.createEle("input");
dvObjInput.type="button";
wedit.uMethods.setElementAttr(dvObjInput,"id","codeButton");
dvObjFooter.appendChild(dvObjInput);
wedit.toolClass.setCodeFrameworkStyle(dvObj);
//加载关闭弹窗事件
wedit.toolMethods.closeCodeDailog(wedit.configs.codeCloseSpanId,wedit.configs.codeFrameId);
wedit.toolMethods.insetIntoCode("codeButton","codeContent","codeCloseSpan","codeFrame");
wedit.uMethods.autoLoadWinWidth();
};
},
//code 字符串处理;
textareaStrChageCode:function(str){
let st = str.replace(/</g,"<");
st = st.replace(/>/g,">");
return st;
},
getSelectContextChange:function(obj){
for(var j in wedit.tbarMenu){
this.getSelConChange(j);
}
},
getSelConChange:function(j){
wedit.uMethods.getEleById(j).addEventListener("click",function(){
//wedit.uMethods.getEleById("")
},false);
},
},
//编辑器内容框
content:function(){
wedit.conTextObj = this.uMethods.createEle("div");
wedit.contentObj.appendChild(this.toolClass.setTooBgColor(wedit.conTextObj,wedit.configs.conTextW,wedit.configs.conTextH,wedit.configs.conTextBorder,wedit.configs.conTextBgCOlor));
//wedit.textareaObj = this.uMethods.createEle("textarea",'textarea');
wedit.textareaObj = this.uMethods.createEle("div");
wedit.textareaObj.setAttribute("id",wedit.configs.textareaId);
wedit.textareaObj.setAttribute("name","content");
wedit.textareaObj.setAttribute("contenteditable","true");
this.toolMethods.setTextareaStyle(wedit.textareaObj);
wedit.conTextObj.appendChild(wedit.textareaObj);
wedit.contentMethods.textareaOnSelectEvent(wedit.configs.textareaId);
},
//对编辑器内容的操作
contentClass:{
},
contentMethods:{
//触发内容选中的事件;
textareaOnSelectEvent:function(tid){
wedit.uMethods.getEleById(tid).addEventListener("select",function(e){
e.originalTarget.value;
},false);
}
},
//常用的操作方法
uMethods:{
getEleById:function(id){
return document.getElementById(id);
},
getEleByTagName(tag){
return document.getElementsByClassName(tag);
},
createEle:function(ele,type){
var eLe = document.createElement(ele);
if(type == 'div'){
eLe.className = wedit.configs.toolbarClassName;
}else if(type == 'textarea'){
eLe.className = wedit.configs.textareaClassName;
}else
|
if(type == 'h1'
|
identifier_name
|
|
index.funcs.js
|
(num2Scroll, dir2Scroll) {
carousel.carouFredSel({
align : "center",
width : "100%",
onWindowResize : 'throttle',
items : Math.round(window.innerWidth/200),
scroll : window.num2Scroll,
direction : window.dir2Scroll,
swipe : {
onTouch : true
},
prev : {
button : c2
},
next : {
button : c5
}
}, {
debug : false // !!! production - set to false
});
}
//!!!Fire carousel on resize - is there a method for this (check API)?
$(window).resize(function(dir2Scroll) {
if(list_img.css("display") !== "none") {
setCarousel(window.num2Scroll, window.dir2Scroll); //!!!dont reset
}
});
content_frame.css("display","none");
/*setup links*/
list_img.css("display","none"); //hides images embedded in links
//nav menu clicks - text
menu_text.on('click', function(e) {
e.preventDefault();
if(!ctrls.hasClass("transparent")) {
list_img.hide(); //hides images embedded in links
carousel.trigger("destroy", "origOrder").removeAttr("style");
list.removeClass("list-carousel").addClass("list-text");
//add 3 col lis back in
block1.wrapAll('<ul id="t1" class="reset"></ul>');
block2.wrapAll('<ul id="t2"></ul>');
block3.wrapAll('<ul id="t3"></ul>');
ctrls.addClass("transparent");
menu_text.addClass("current hide").parent().addClass("current");
menu_graphics.removeClass("current hide").parent().removeClass("current");
carousel_help.fadeOut("fast");
history.pushState('text','Home',url_no_params+'?state=text-list');
}
});
//nav menu clicks - graphics
menu_graphics.on('click', function(e, num2Scroll, dir2Scroll) {
e.preventDefault();
if(list_img.css("display") !== "inline-table") {
list_img.css("display","inline-table").removeClass('hidden'); //displays hidden images embedded in links
list.removeClass("list-text").addClass("list-carousel");
carousel_li.unwrap();
setCarousel(window.num2Scroll, window.dir2Scroll);
ctrls.removeClass("transparent").center({vertical: false});
menu_graphics.addClass("current hide").parent().addClass("current");
menu_text.removeClass("current hide").parent().removeClass("current");
carousel_help.fadeIn(2000);
history.pushState('graphics','Home',url_no_params+'?state=img-list');
}
});
/*reset button click*/
reset.on('click', function(e) {
e.preventDefault();
content_frame.fadeOut("slow", function() {
content_frame.empty();
land_aside.fadeIn("slow");
legend.fadeIn("slow");
});
reset.addClass("current").parent().addClass("current"); //sets the parent li - otherwise hover color bleeds thru padding
});
/*note flipper */
note_flip.on('click', function(e) {
e.preventDefault();
var next = 0;
for(i=1;i<5;i++){
if(eval($("#note"+i)).hasClass('show')){
eval($("#note"+i)).removeClass().addClass("hide");
if(i===4){
next = 1;
$("#todo").text("To do:");
} else {
next = i+1;
$("#todo").text("Past to dos:").addClass("note-font");
}
}
}
eval($("#note"+next)).removeClass().addClass("show");
});
/*carousel controls*/
c1.on('click', function(e, num2Scroll, dir2Scroll) { //slow down num2Scroll
e.preventDefault();
if(window.num2Scroll > 1) {
carousel.trigger("configuration", ["scroll", window.num2Scroll-=1], "play");
}
});
c2.on('click', function(e, num2Scroll, dir2Scroll) { //scroll backward
e.preventDefault();
carousel.trigger("configuration", ["direction", "right"], "play");
});
c3.on('click', function(e, num2Scroll, dir2Scroll) { //pause scroll
e.preventDefault();
carousel.trigger("pause", true);
});
c4.on('click', function(e, num2Scroll, dir2Scroll) { //start scroll
e.preventDefault();
if(!carousel.triggerHandler("isScrolling")) {
carousel.trigger("play", [window.dir2Scroll, true]);
} else {
carousel.trigger("resume");
}
});
c5.on('click', function(e, num2Scroll, dir2Scroll) { //scroll forward
e.preventDefault();
carousel.trigger("configuration", ["direction", "left"], "play");
});
c6.on('click', function(e, num2Scroll, dir2Scroll) { //speed up scroll
e.preventDefault();
var numVisible = carousel.triggerHandler("configuration", "items.visible");
if(window.num2Scroll < numVisible) {
carousel.trigger("configuration", ["scroll", window.num2Scroll+=1], "play");
}
});
/*!!!feed click handler - I'm sure there's a better way*/
feed_btn.on('click', function(e) {
var id = this.id,
html = '<h2 class="to-center">Latest ' +id.substr(0,1).toUpperCase()+id.substr(1)+ ' Updates</h2><ul class="nolist">',
http = '',
obj = '',
date = '',
show = '',
tmp = '',
limit = 5,
closer = "</ul>";
success = false;
show = "content_frame.css('display','inline').removeClass('image-matrix')";
switch (id) {
case ('blogger'):
http = 'https://www.googleapis.com/blogger/v3/blogs/2575251403540723939/posts?key=AIzaSyC4Zhv-nd_98_9Vn8Ad3U6TjY99Pd2YzOQ';
obj = 'data.items';
tmp = "'<li><time datetime=\"' + item.updated + '\">' + item.updated.substr(0,10) + '</time>: <a href=\"' + item.url + '\" target=\"_blank\">' + item.title + '</a></li>'";
limit = 5;
break;
case ('twitter'):
http = 'http://search.twitter.com/search.json?q=jahdakine&callback=?';
obj = 'data.results';
tmp = "'<li><img src=\"' +item.profile_image_url+ '\" height=\"25\" width=\"25\" alt=\"profile icon\"/> <time datetime=\"' +item.created_at.split(' ').slice(0, 4).join(' ')+ '\">' +item.created_at.split(' ').slice(0, 4).join(' ')+ '</time>: <a href=\"http://twitter.com/jahdakine/status/' +item.id_str+ '\" target=\"_blank\">' +item.text+ '</a></li>'";
limit = 10;
break;
case ('flickr'):
html = "<div id='flickr-container' class='boxWrapper'>";
closer = "</div><div class=\"clear-fix\">";
http = 'http://api.flickr.com/services/feeds/photos_public.gne?id=23019891@N00&lang=en-us&format=json&jsoncallback=?';
obj = 'data.items';
tmp = "'<div class=\"boxOuter\"><a href=\"' + item.link + '\" target=\"_blank\" class=\"flickr-img boxInner\" title=\"Open Flickr page titled "' + item.title + '" in a new window/tab\"><img src=\"' + item.media.m + '\" /></a></div>'";
show = "content_frame.css('display','block').addClass('image-matrix')";
limit = 20;
break;
case ('meetup'): //venue=1139097 member=65732862 group=1769691 group_urlname=HTML5-Denver-Users-Group
http = 'http://api.meetup.com/activity?_=1361290215235&member_id=65732862&format=json&sig_id=65732862&sig=7be5cdcf1093d70515959c1b785e75c67f9c642f';
obj = 'data.results';
tmp = "'<li>' +item.updated+ ': <a href=\"' +item.link+ '\" title=\"Open' +item.title+ ' in a new window\" target=\"_blank\">' +item.title+ '</a><li>'";
limit = 3;
break;
|
setCarousel
|
identifier_name
|
|
index.funcs.js
|
tmp = "'<li><time datetime=\"' + item.updated + '\">' + item.updated.substr(0,10) + '</time>: <a href=\"' + item.url + '\" target=\"_blank\">' + item.title + '</a></li>'";
limit = 5;
break;
case ('twitter'):
http = 'http://search.twitter.com/search.json?q=jahdakine&callback=?';
obj = 'data.results';
tmp = "'<li><img src=\"' +item.profile_image_url+ '\" height=\"25\" width=\"25\" alt=\"profile icon\"/> <time datetime=\"' +item.created_at.split(' ').slice(0, 4).join(' ')+ '\">' +item.created_at.split(' ').slice(0, 4).join(' ')+ '</time>: <a href=\"http://twitter.com/jahdakine/status/' +item.id_str+ '\" target=\"_blank\">' +item.text+ '</a></li>'";
limit = 10;
break;
case ('flickr'):
html = "<div id='flickr-container' class='boxWrapper'>";
closer = "</div><div class=\"clear-fix\">";
http = 'http://api.flickr.com/services/feeds/photos_public.gne?id=23019891@N00&lang=en-us&format=json&jsoncallback=?';
obj = 'data.items';
tmp = "'<div class=\"boxOuter\"><a href=\"' + item.link + '\" target=\"_blank\" class=\"flickr-img boxInner\" title=\"Open Flickr page titled "' + item.title + '" in a new window/tab\"><img src=\"' + item.media.m + '\" /></a></div>'";
show = "content_frame.css('display','block').addClass('image-matrix')";
limit = 20;
break;
case ('meetup'): //venue=1139097 member=65732862 group=1769691 group_urlname=HTML5-Denver-Users-Group
http = 'http://api.meetup.com/activity?_=1361290215235&member_id=65732862&format=json&sig_id=65732862&sig=7be5cdcf1093d70515959c1b785e75c67f9c642f';
obj = 'data.results';
tmp = "'<li>' +item.updated+ ': <a href=\"' +item.link+ '\" title=\"Open' +item.title+ ' in a new window\" target=\"_blank\">' +item.title+ '</a><li>'";
limit = 3;
break;
case ('github'):
http = "https://api.github.com/repos/jahdakine/jahdakine/commits?callback=?";
obj = 'data.data';
tmp = "'<li>' +item.commit.author.date.substr(0,10)+ ' ' +item.commit.author.date.substr(11,8)+ ': <a href=\"' +item.html_url+ '\" title=\"Open Github commit log in a new window\" target=\"_blank\">' +item.commit.message+ '</a></li>'";
limit = 5;
break;
case ('youtube'):
http="https://gdata.youtube.com/feeds/api/users/jahdakine/uploads?v=2&alt=json";
obj = "data.feed.entry";
tmp = "'<li><time datetime=\"' + item.updated.$t + '\">' +item.updated.$t.substr(0,10) + '</time>: <a href=\"' +item.link[0].href+ '\" title=\"Open' +item.title.$t+ 'in a new window\" target=\"_blank\">' +item.title.$t+ '</a></li>'";
break;
case ('coderbits'):
http="https://coderbits.com/jahdakine.json";
obj="data.badges";
tmp = "'<li>' + item + '</li>'";
break;
case ('google'):
http = 'https://www.googleapis.com/plus/v1/people/114704033710627861845/activities/public?key=AIzaSyC7qL3rj2BltH6GV6WOjovK3zuuS5sy024';
obj = 'data.items';
tmp = "'<li><img src=\"' +item.actor.image.url+ '\" alt=\"\" height=\"25\" width=\"25\"/> <time datetime=\"' + item.updated + '\">' + item.updated.substr(0,10) + '</time>: <a href=\"' + item.url + '\" target=\"_blank\">' + item.object.attachments[0].content.substr(0,50) + '...</a></li>'";
limit = 5;
break;
case ('zazzle'):
tmp = ["<embed wmode=\"transparent\" src=\"http://www.zazzle.com/utl/getpanel?zp=117573488824205121\" FlashVars=\"feedId=117573488824205121\" width=\"450\" height=\"300\" type=\"application/x-shockwave-flash\"></embed>",
"<embed wmode=\"transparent\" src=\"http://www.zazzle.com/utl/getpanel?zp=117453752667062082\" FlashVars=\"feedId=117453752667062082\" width=\"450\" height=\"300\" type=\"application/x-shockwave-flash\"></embed>",
"<embed wmode=\"transparent\" src=\"http://www.zazzle.com/utl/getpanel?zp=117631920418883930\" FlashVars=\"feedId=117631920418883930\" width=\"450\" height=\"300\" type=\"application/x-shockwave-flash\"></embed>"];
break;
case ('stackoverflow'):
http="http://api.stackoverflow.com/1.0/users/1997909/?jsonp=?";
obj = 'data.users';
tmp = "'<li><strong>Reputation</strong>:<br>' +item.reputation+ '<br><strong>Badges</strong>:<br> Gold-' +item.badge_counts.gold+ ' <br>Silver-' +item.badge_counts.silver+ ' <br>Bronze-' +item.badge_counts.bronze+ '</li>'";
break;
case ('grooveshark'):
http="false";
break;
case ('yelp'):
http="false";
break;
case ('ebay'):
http="false";
break;
case ('icloud'):
http="false";
break;
case ('vimeo'):
http="false";
break;
case ('netflix'):
http="false";
break;
case ('evernote'):
http="false";
break;
case ('picassa'):
http="https://picasaweb.google.com/data/feed/api/user/114704033710627861845/albumid/5807772905643040065?callback=?";
obj='data';
break;
case ('fandango'):
http="false";
break;
case ('gnerdl'):
http="false";
break;
case ('linkedin'):
http='';
//html += '<h3 class="to-center">Recommendations</h3>';
//tmp=recosHTML;
break;
}
//put html into content frame
function appendDOM(html) {
//console.log(html);
if(reset.hasClass("current")) {
reset.removeClass("current");
land_aside.fadeOut("slow");
legend.hide();
} else {
content_frame.hide();
}
content_frame.fadeIn("slow", function() {
eval(show);
}).html(html);
}
//make xhr request
function getFeed(http, obj, tmp, html, id)
|
{
//console.log(http);
//!!!cache? Would need to use local storage or DB or jquery-json.2.4.0
if(http !== '') {
$.ajax({
dataType: "jsonp",
jsonp: "callback",
url: http,
success: function(data) {
console.log("Data received via test: " + JSON.stringify(data));
if(id==="coderbits") {
var unique=0, total=0, content={"name":"", "amount":0, "img":""};
$.each(eval(obj), function(i,item) {
if(item.earned) {
content.name=item.name;
content.amount=item.level;
content.img=item.image_link;
total++;
if(item.level===1) {
unique++;
|
identifier_body
|
|
index.funcs.js
|
window\" target=\"_blank\">' +item.commit.message+ '</a></li>'";
limit = 5;
break;
case ('youtube'):
http="https://gdata.youtube.com/feeds/api/users/jahdakine/uploads?v=2&alt=json";
obj = "data.feed.entry";
tmp = "'<li><time datetime=\"' + item.updated.$t + '\">' +item.updated.$t.substr(0,10) + '</time>: <a href=\"' +item.link[0].href+ '\" title=\"Open' +item.title.$t+ 'in a new window\" target=\"_blank\">' +item.title.$t+ '</a></li>'";
break;
case ('coderbits'):
http="https://coderbits.com/jahdakine.json";
obj="data.badges";
tmp = "'<li>' + item + '</li>'";
break;
case ('google'):
http = 'https://www.googleapis.com/plus/v1/people/114704033710627861845/activities/public?key=AIzaSyC7qL3rj2BltH6GV6WOjovK3zuuS5sy024';
obj = 'data.items';
tmp = "'<li><img src=\"' +item.actor.image.url+ '\" alt=\"\" height=\"25\" width=\"25\"/> <time datetime=\"' + item.updated + '\">' + item.updated.substr(0,10) + '</time>: <a href=\"' + item.url + '\" target=\"_blank\">' + item.object.attachments[0].content.substr(0,50) + '...</a></li>'";
limit = 5;
break;
case ('zazzle'):
tmp = ["<embed wmode=\"transparent\" src=\"http://www.zazzle.com/utl/getpanel?zp=117573488824205121\" FlashVars=\"feedId=117573488824205121\" width=\"450\" height=\"300\" type=\"application/x-shockwave-flash\"></embed>",
"<embed wmode=\"transparent\" src=\"http://www.zazzle.com/utl/getpanel?zp=117453752667062082\" FlashVars=\"feedId=117453752667062082\" width=\"450\" height=\"300\" type=\"application/x-shockwave-flash\"></embed>",
"<embed wmode=\"transparent\" src=\"http://www.zazzle.com/utl/getpanel?zp=117631920418883930\" FlashVars=\"feedId=117631920418883930\" width=\"450\" height=\"300\" type=\"application/x-shockwave-flash\"></embed>"];
break;
case ('stackoverflow'):
http="http://api.stackoverflow.com/1.0/users/1997909/?jsonp=?";
obj = 'data.users';
tmp = "'<li><strong>Reputation</strong>:<br>' +item.reputation+ '<br><strong>Badges</strong>:<br> Gold-' +item.badge_counts.gold+ ' <br>Silver-' +item.badge_counts.silver+ ' <br>Bronze-' +item.badge_counts.bronze+ '</li>'";
break;
case ('grooveshark'):
http="false";
break;
case ('yelp'):
http="false";
break;
case ('ebay'):
http="false";
break;
case ('icloud'):
http="false";
break;
case ('vimeo'):
http="false";
break;
case ('netflix'):
http="false";
break;
case ('evernote'):
http="false";
break;
case ('picassa'):
http="https://picasaweb.google.com/data/feed/api/user/114704033710627861845/albumid/5807772905643040065?callback=?";
obj='data';
break;
case ('fandango'):
http="false";
break;
case ('gnerdl'):
http="false";
break;
case ('linkedin'):
http='';
//html += '<h3 class="to-center">Recommendations</h3>';
//tmp=recosHTML;
break;
}
//put html into content frame
function appendDOM(html) {
//console.log(html);
if(reset.hasClass("current")) {
reset.removeClass("current");
land_aside.fadeOut("slow");
legend.hide();
} else {
content_frame.hide();
}
content_frame.fadeIn("slow", function() {
eval(show);
}).html(html);
}
//make xhr request
function getFeed(http, obj, tmp, html, id) {
//console.log(http);
//!!!cache? Would need to use local storage or DB or jquery-json.2.4.0
if(http !== '') {
$.ajax({
dataType: "jsonp",
jsonp: "callback",
url: http,
success: function(data) {
console.log("Data received via test: " + JSON.stringify(data));
if(id==="coderbits") {
var unique=0, total=0, content={"name":"", "amount":0, "img":""};
$.each(eval(obj), function(i,item) {
if(item.earned) {
content.name=item.name;
content.amount=item.level;
content.img=item.image_link;
total++;
if(item.level===1) {
unique++;
}
}
if(item.level===64 && content.amount>0) {
tmp = '<p id="coderbits-badges"><img class="to-middle" src="' +content.img+ '" title="' +content.name+ ' badge" height="40" width="40" /> ' +content.amount+ ' bit ' +content.name+ '</p>';
html += tmp;
content={"name":"", "amount":0, "img":""};
}
});
var totals = '<p>' +total+ ' badges earned, ' +unique+ ' shown represent the highest achievement in category.</p>';
html += totals;
//main handler
} else {
$.each(eval(obj), function(i,item) {
console.log(item);
if(id === 'google' && item.object.attachments[0].content.substr(-4) === '.jpg') {
tmp = "'<li><img src=\"' +item.actor.image.url+ '\" alt=\"\" height=\"25\" width=\"25\"/> <time datetime=\"' + item.updated + '\">' + item.updated.substr(0,10) + '</time>: <a href=\"' + item.url + '\" target=\"_blank\"><img src=\"' + item.object.attachments[0].fullImage.url + '\" height=\"150\" width=\"150\" alt=\"\" class=\"feedStyle\"/></a></li>'";
}
html += eval(tmp);
if(i > limit) { return false; }
});
if(id !== 'flickr' && html.search("<li>") === -1) {
html+='<li><img src="/img/warning-icon.png" height="16" width="16" alt=""/> Sorry, nothing today!</li>';
}
}
html += closer;
success = true;
appendDOM(html);
}
});
//non-standard feed
} else {
success = true;
if(id==='zazzle') {
var rand = Math.floor((Math.random()*3));
html = '<h2 class="to-center">Latest Zazzle Products</h2><div class="to-center">' +tmp[rand]+ "</div>";
} else {
html += tmp;
}
appendDOM(html);
}
}
getFeed(http, obj, tmp, html, id);
//ERROR: Can be tested by commenting appendDOM(html) line in getFeed
setTimeout(function() {
if (!success) {
html = '<h2 class="to-center">Timed out!</h2><blockquote>The request for ' +id.substr(0,1).toUpperCase()+id.substr(1)+ ' data has timed out. Please try again later.</blockquote>';
appendDOM(html);
}
}, 2000);
});
// list.on('click', function(e) {
// if(list_img.css("display") === "inline-table") {
// e.preventDefault();
// var quit = confirm("Jane, stop this crazy thing!");
// if(quit) { menu_text.trigger('click'); }
// }
// });
/* Check for state */
if(url === '?state=img-list')
|
{
menu_graphics.trigger('click');
}
|
conditional_block
|
|
index.funcs.js
|
= false;
show = "content_frame.css('display','inline').removeClass('image-matrix')";
switch (id) {
case ('blogger'):
http = 'https://www.googleapis.com/blogger/v3/blogs/2575251403540723939/posts?key=AIzaSyC4Zhv-nd_98_9Vn8Ad3U6TjY99Pd2YzOQ';
obj = 'data.items';
tmp = "'<li><time datetime=\"' + item.updated + '\">' + item.updated.substr(0,10) + '</time>: <a href=\"' + item.url + '\" target=\"_blank\">' + item.title + '</a></li>'";
limit = 5;
break;
case ('twitter'):
http = 'http://search.twitter.com/search.json?q=jahdakine&callback=?';
obj = 'data.results';
tmp = "'<li><img src=\"' +item.profile_image_url+ '\" height=\"25\" width=\"25\" alt=\"profile icon\"/> <time datetime=\"' +item.created_at.split(' ').slice(0, 4).join(' ')+ '\">' +item.created_at.split(' ').slice(0, 4).join(' ')+ '</time>: <a href=\"http://twitter.com/jahdakine/status/' +item.id_str+ '\" target=\"_blank\">' +item.text+ '</a></li>'";
limit = 10;
break;
case ('flickr'):
html = "<div id='flickr-container' class='boxWrapper'>";
closer = "</div><div class=\"clear-fix\">";
http = 'http://api.flickr.com/services/feeds/photos_public.gne?id=23019891@N00&lang=en-us&format=json&jsoncallback=?';
obj = 'data.items';
tmp = "'<div class=\"boxOuter\"><a href=\"' + item.link + '\" target=\"_blank\" class=\"flickr-img boxInner\" title=\"Open Flickr page titled "' + item.title + '" in a new window/tab\"><img src=\"' + item.media.m + '\" /></a></div>'";
show = "content_frame.css('display','block').addClass('image-matrix')";
limit = 20;
break;
case ('meetup'): //venue=1139097 member=65732862 group=1769691 group_urlname=HTML5-Denver-Users-Group
http = 'http://api.meetup.com/activity?_=1361290215235&member_id=65732862&format=json&sig_id=65732862&sig=7be5cdcf1093d70515959c1b785e75c67f9c642f';
obj = 'data.results';
tmp = "'<li>' +item.updated+ ': <a href=\"' +item.link+ '\" title=\"Open' +item.title+ ' in a new window\" target=\"_blank\">' +item.title+ '</a><li>'";
limit = 3;
break;
case ('github'):
http = "https://api.github.com/repos/jahdakine/jahdakine/commits?callback=?";
obj = 'data.data';
tmp = "'<li>' +item.commit.author.date.substr(0,10)+ ' ' +item.commit.author.date.substr(11,8)+ ': <a href=\"' +item.html_url+ '\" title=\"Open Github commit log in a new window\" target=\"_blank\">' +item.commit.message+ '</a></li>'";
limit = 5;
break;
case ('youtube'):
http="https://gdata.youtube.com/feeds/api/users/jahdakine/uploads?v=2&alt=json";
obj = "data.feed.entry";
tmp = "'<li><time datetime=\"' + item.updated.$t + '\">' +item.updated.$t.substr(0,10) + '</time>: <a href=\"' +item.link[0].href+ '\" title=\"Open' +item.title.$t+ 'in a new window\" target=\"_blank\">' +item.title.$t+ '</a></li>'";
break;
case ('coderbits'):
http="https://coderbits.com/jahdakine.json";
obj="data.badges";
tmp = "'<li>' + item + '</li>'";
break;
case ('google'):
http = 'https://www.googleapis.com/plus/v1/people/114704033710627861845/activities/public?key=AIzaSyC7qL3rj2BltH6GV6WOjovK3zuuS5sy024';
obj = 'data.items';
tmp = "'<li><img src=\"' +item.actor.image.url+ '\" alt=\"\" height=\"25\" width=\"25\"/> <time datetime=\"' + item.updated + '\">' + item.updated.substr(0,10) + '</time>: <a href=\"' + item.url + '\" target=\"_blank\">' + item.object.attachments[0].content.substr(0,50) + '...</a></li>'";
limit = 5;
break;
case ('zazzle'):
tmp = ["<embed wmode=\"transparent\" src=\"http://www.zazzle.com/utl/getpanel?zp=117573488824205121\" FlashVars=\"feedId=117573488824205121\" width=\"450\" height=\"300\" type=\"application/x-shockwave-flash\"></embed>",
"<embed wmode=\"transparent\" src=\"http://www.zazzle.com/utl/getpanel?zp=117453752667062082\" FlashVars=\"feedId=117453752667062082\" width=\"450\" height=\"300\" type=\"application/x-shockwave-flash\"></embed>",
"<embed wmode=\"transparent\" src=\"http://www.zazzle.com/utl/getpanel?zp=117631920418883930\" FlashVars=\"feedId=117631920418883930\" width=\"450\" height=\"300\" type=\"application/x-shockwave-flash\"></embed>"];
break;
case ('stackoverflow'):
http="http://api.stackoverflow.com/1.0/users/1997909/?jsonp=?";
obj = 'data.users';
tmp = "'<li><strong>Reputation</strong>:<br>' +item.reputation+ '<br><strong>Badges</strong>:<br> Gold-' +item.badge_counts.gold+ ' <br>Silver-' +item.badge_counts.silver+ ' <br>Bronze-' +item.badge_counts.bronze+ '</li>'";
break;
case ('grooveshark'):
http="false";
break;
case ('yelp'):
http="false";
break;
case ('ebay'):
http="false";
break;
case ('icloud'):
http="false";
break;
case ('vimeo'):
http="false";
break;
case ('netflix'):
http="false";
break;
case ('evernote'):
http="false";
break;
case ('picassa'):
http="https://picasaweb.google.com/data/feed/api/user/114704033710627861845/albumid/5807772905643040065?callback=?";
obj='data';
break;
case ('fandango'):
http="false";
break;
case ('gnerdl'):
http="false";
break;
case ('linkedin'):
http='';
//html += '<h3 class="to-center">Recommendations</h3>';
//tmp=recosHTML;
break;
}
//put html into content frame
function appendDOM(html) {
//console.log(html);
if(reset.hasClass("current")) {
reset.removeClass("current");
land_aside.fadeOut("slow");
legend.hide();
} else {
content_frame.hide();
}
content_frame.fadeIn("slow", function() {
eval(show);
}).html(html);
}
//make xhr request
function getFeed(http, obj, tmp, html, id) {
//console.log(http);
//!!!cache? Would need to use local storage or DB or jquery-json.2.4.0
if(http !== '') {
$.ajax({
|
dataType: "jsonp",
jsonp: "callback",
|
random_line_split
|
|
backend.rs
|
the License.
//! Substrate blockchain trait
use log::warn;
use parking_lot::RwLock;
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, Header as HeaderT, NumberFor, Saturating},
Justifications,
};
use std::collections::btree_set::BTreeSet;
use crate::header_metadata::HeaderMetadata;
use crate::error::{Error, Result};
/// Blockchain database header backend. Does not perform any validation.
pub trait HeaderBackend<Block: BlockT>: Send + Sync {
/// Get block header. Returns `None` if block is not found.
fn header(&self, hash: Block::Hash) -> Result<Option<Block::Header>>;
/// Get blockchain info.
fn info(&self) -> Info<Block>;
/// Get block status.
fn status(&self, hash: Block::Hash) -> Result<BlockStatus>;
/// Get block number by hash. Returns `None` if the header is not in the chain.
fn number(
&self,
hash: Block::Hash,
) -> Result<Option<<<Block as BlockT>::Header as HeaderT>::Number>>;
/// Get block hash by number. Returns `None` if the header is not in the chain.
fn hash(&self, number: NumberFor<Block>) -> Result<Option<Block::Hash>>;
/// Convert an arbitrary block ID into a block hash.
fn block_hash_from_id(&self, id: &BlockId<Block>) -> Result<Option<Block::Hash>> {
match *id {
BlockId::Hash(h) => Ok(Some(h)),
BlockId::Number(n) => self.hash(n),
}
}
/// Convert an arbitrary block ID into a block hash.
fn block_number_from_id(&self, id: &BlockId<Block>) -> Result<Option<NumberFor<Block>>> {
match *id {
BlockId::Hash(h) => self.number(h),
BlockId::Number(n) => Ok(Some(n)),
}
}
/// Get block header. Returns `UnknownBlock` error if block is not found.
fn
|
(&self, hash: Block::Hash) -> Result<Block::Header> {
self.header(hash)?
.ok_or_else(|| Error::UnknownBlock(format!("Expect header: {}", hash)))
}
/// Convert an arbitrary block ID into a block number. Returns `UnknownBlock` error if block is
/// not found.
fn expect_block_number_from_id(&self, id: &BlockId<Block>) -> Result<NumberFor<Block>> {
self.block_number_from_id(id).and_then(|n| {
n.ok_or_else(|| Error::UnknownBlock(format!("Expect block number from id: {}", id)))
})
}
/// Convert an arbitrary block ID into a block hash. Returns `UnknownBlock` error if block is
/// not found.
fn expect_block_hash_from_id(&self, id: &BlockId<Block>) -> Result<Block::Hash> {
self.block_hash_from_id(id).and_then(|h| {
h.ok_or_else(|| Error::UnknownBlock(format!("Expect block hash from id: {}", id)))
})
}
}
/// Handles stale forks.
pub trait ForkBackend<Block: BlockT>:
HeaderMetadata<Block> + HeaderBackend<Block> + Send + Sync
{
/// Best effort to get all the header hashes that are part of the provided forks
/// starting only from the fork heads.
///
/// The function tries to reconstruct the route from the fork head to the canonical chain.
/// If any of the hashes on the route can't be found in the db, the function won't be able
/// to reconstruct the route anymore. In this case it will give up expanding the current fork,
/// move on to the next ones and at the end it will return an error that also contains
/// the partially expanded forks.
fn expand_forks(
&self,
fork_heads: &[Block::Hash],
) -> std::result::Result<BTreeSet<Block::Hash>, (BTreeSet<Block::Hash>, Error)> {
let mut missing_blocks = vec![];
let mut expanded_forks = BTreeSet::new();
for fork_head in fork_heads {
let mut route_head = *fork_head;
// Insert stale blocks hashes until canonical chain is reached.
// If we reach a block that is already part of the `expanded_forks` we can stop
// processing the fork.
while expanded_forks.insert(route_head) {
match self.header_metadata(route_head) {
Ok(meta) => {
// If the parent is part of the canonical chain or there doesn't exist a
// block hash for the parent number (bug?!), we can abort adding blocks.
let parent_number = meta.number.saturating_sub(1u32.into());
match self.hash(parent_number) {
Ok(Some(parent_hash)) =>
if parent_hash == meta.parent {
break
},
Ok(None) | Err(_) => {
missing_blocks.push(BlockId::<Block>::Number(parent_number));
break
},
}
route_head = meta.parent;
},
Err(_e) => {
missing_blocks.push(BlockId::<Block>::Hash(route_head));
break
},
}
}
}
if !missing_blocks.is_empty() {
return Err((
expanded_forks,
Error::UnknownBlocks(format!(
"Missing stale headers {:?} while expanding forks {:?}.",
fork_heads, missing_blocks
)),
))
}
Ok(expanded_forks)
}
}
impl<Block, T> ForkBackend<Block> for T
where
Block: BlockT,
T: HeaderMetadata<Block> + HeaderBackend<Block> + Send + Sync,
{
}
/// Blockchain database backend. Does not perform any validation.
pub trait Backend<Block: BlockT>:
HeaderBackend<Block> + HeaderMetadata<Block, Error = Error>
{
/// Get block body. Returns `None` if block is not found.
fn body(&self, hash: Block::Hash) -> Result<Option<Vec<<Block as BlockT>::Extrinsic>>>;
/// Get block justifications. Returns `None` if no justification exists.
fn justifications(&self, hash: Block::Hash) -> Result<Option<Justifications>>;
/// Get last finalized block hash.
fn last_finalized(&self) -> Result<Block::Hash>;
/// Returns hashes of all blocks that are leaves of the block tree.
/// in other words, that have no children, are chain heads.
/// Results must be ordered best (longest, highest) chain first.
fn leaves(&self) -> Result<Vec<Block::Hash>>;
/// Returns displaced leaves after the given block would be finalized.
///
/// The returned leaves do not contain the leaves from the same height as `block_number`.
fn displaced_leaves_after_finalizing(
&self,
block_number: NumberFor<Block>,
) -> Result<Vec<Block::Hash>>;
/// Return hashes of all blocks that are children of the block with `parent_hash`.
fn children(&self, parent_hash: Block::Hash) -> Result<Vec<Block::Hash>>;
/// Get the most recent block hash of the longest chain that contains
/// a block with the given `base_hash`.
///
/// The search space is always limited to blocks which are in the finalized
/// chain or descendents of it.
///
/// Returns `Ok(None)` if `base_hash` is not found in search space.
// TODO: document time complexity of this, see [#1444](https://github.com/paritytech/substrate/issues/1444)
fn longest_containing(
&self,
base_hash: Block::Hash,
import_lock: &RwLock<()>,
) -> Result<Option<Block::Hash>> {
let Some(base_header) = self.header(base_hash)? else { return Ok(None) };
let leaves = {
// ensure no blocks are imported during this code block.
// an import could trigger a reorg which could change the canonical chain.
// we depend on the canonical chain staying the same during this code block.
let _import_guard = import_lock.read();
let info = self.info();
if info.finalized_number > *base_header.number() {
// `base_header` is on a dead fork.
return Ok(None)
}
self.leaves()?
};
// for each chain. longest chain first. shortest last
for leaf_hash in leaves {
let mut current_hash = leaf_hash;
// go backwards through the chain (via parent links)
loop {
if current_hash == base_hash {
return Ok(Some(leaf_hash))
}
let current_header = self
.header(current_hash)?
.ok_or_else(|| Error::MissingHeader(current_hash.to_string()))?;
// stop search in this chain once we go below the target's block number
if current_header.number() < base_header.number() {
break
}
current_hash = *current_header.parent_hash();
}
}
// header may be on a dead fork -- the only leaves that are considered are
// those which can still be finalized.
//
// FIXME #1558 only issue this warning when not on a dead fork
warn!(
"Block {:?} exists in chain but not found when following all leaves backwards",
base_hash,
);
Ok(None)
|
expect_header
|
identifier_name
|
backend.rs
|
License.
//! Substrate blockchain trait
use log::warn;
use parking_lot::RwLock;
use sp_runtime::{
generic::BlockId,
traits::{Block as BlockT, Header as HeaderT, NumberFor, Saturating},
Justifications,
};
use std::collections::btree_set::BTreeSet;
use crate::header_metadata::HeaderMetadata;
use crate::error::{Error, Result};
/// Blockchain database header backend. Does not perform any validation.
pub trait HeaderBackend<Block: BlockT>: Send + Sync {
/// Get block header. Returns `None` if block is not found.
fn header(&self, hash: Block::Hash) -> Result<Option<Block::Header>>;
/// Get blockchain info.
fn info(&self) -> Info<Block>;
/// Get block status.
fn status(&self, hash: Block::Hash) -> Result<BlockStatus>;
/// Get block number by hash. Returns `None` if the header is not in the chain.
fn number(
&self,
hash: Block::Hash,
) -> Result<Option<<<Block as BlockT>::Header as HeaderT>::Number>>;
/// Get block hash by number. Returns `None` if the header is not in the chain.
fn hash(&self, number: NumberFor<Block>) -> Result<Option<Block::Hash>>;
/// Convert an arbitrary block ID into a block hash.
fn block_hash_from_id(&self, id: &BlockId<Block>) -> Result<Option<Block::Hash>> {
match *id {
BlockId::Hash(h) => Ok(Some(h)),
BlockId::Number(n) => self.hash(n),
}
}
/// Convert an arbitrary block ID into a block hash.
fn block_number_from_id(&self, id: &BlockId<Block>) -> Result<Option<NumberFor<Block>>> {
match *id {
BlockId::Hash(h) => self.number(h),
BlockId::Number(n) => Ok(Some(n)),
}
}
/// Get block header. Returns `UnknownBlock` error if block is not found.
fn expect_header(&self, hash: Block::Hash) -> Result<Block::Header> {
self.header(hash)?
.ok_or_else(|| Error::UnknownBlock(format!("Expect header: {}", hash)))
}
/// Convert an arbitrary block ID into a block number. Returns `UnknownBlock` error if block is
/// not found.
fn expect_block_number_from_id(&self, id: &BlockId<Block>) -> Result<NumberFor<Block>> {
self.block_number_from_id(id).and_then(|n| {
n.ok_or_else(|| Error::UnknownBlock(format!("Expect block number from id: {}", id)))
})
}
/// Convert an arbitrary block ID into a block hash. Returns `UnknownBlock` error if block is
/// not found.
fn expect_block_hash_from_id(&self, id: &BlockId<Block>) -> Result<Block::Hash> {
self.block_hash_from_id(id).and_then(|h| {
h.ok_or_else(|| Error::UnknownBlock(format!("Expect block hash from id: {}", id)))
})
}
}
/// Handles stale forks.
pub trait ForkBackend<Block: BlockT>:
HeaderMetadata<Block> + HeaderBackend<Block> + Send + Sync
{
/// Best effort to get all the header hashes that are part of the provided forks
/// starting only from the fork heads.
///
/// The function tries to reconstruct the route from the fork head to the canonical chain.
/// If any of the hashes on the route can't be found in the db, the function won't be able
/// to reconstruct the route anymore. In this case it will give up expanding the current fork,
/// move on to the next ones and at the end it will return an error that also contains
/// the partially expanded forks.
fn expand_forks(
&self,
fork_heads: &[Block::Hash],
) -> std::result::Result<BTreeSet<Block::Hash>, (BTreeSet<Block::Hash>, Error)> {
let mut missing_blocks = vec![];
let mut expanded_forks = BTreeSet::new();
for fork_head in fork_heads {
let mut route_head = *fork_head;
// Insert stale blocks hashes until canonical chain is reached.
// If we reach a block that is already part of the `expanded_forks` we can stop
// processing the fork.
while expanded_forks.insert(route_head) {
match self.header_metadata(route_head) {
Ok(meta) => {
// If the parent is part of the canonical chain or there doesn't exist a
// block hash for the parent number (bug?!), we can abort adding blocks.
let parent_number = meta.number.saturating_sub(1u32.into());
match self.hash(parent_number) {
Ok(Some(parent_hash)) =>
if parent_hash == meta.parent
|
,
Ok(None) | Err(_) => {
missing_blocks.push(BlockId::<Block>::Number(parent_number));
break
},
}
route_head = meta.parent;
},
Err(_e) => {
missing_blocks.push(BlockId::<Block>::Hash(route_head));
break
},
}
}
}
if !missing_blocks.is_empty() {
return Err((
expanded_forks,
Error::UnknownBlocks(format!(
"Missing stale headers {:?} while expanding forks {:?}.",
fork_heads, missing_blocks
)),
))
}
Ok(expanded_forks)
}
}
impl<Block, T> ForkBackend<Block> for T
where
Block: BlockT,
T: HeaderMetadata<Block> + HeaderBackend<Block> + Send + Sync,
{
}
/// Blockchain database backend. Does not perform any validation.
pub trait Backend<Block: BlockT>:
HeaderBackend<Block> + HeaderMetadata<Block, Error = Error>
{
/// Get block body. Returns `None` if block is not found.
fn body(&self, hash: Block::Hash) -> Result<Option<Vec<<Block as BlockT>::Extrinsic>>>;
/// Get block justifications. Returns `None` if no justification exists.
fn justifications(&self, hash: Block::Hash) -> Result<Option<Justifications>>;
/// Get last finalized block hash.
fn last_finalized(&self) -> Result<Block::Hash>;
/// Returns hashes of all blocks that are leaves of the block tree.
/// in other words, that have no children, are chain heads.
/// Results must be ordered best (longest, highest) chain first.
fn leaves(&self) -> Result<Vec<Block::Hash>>;
/// Returns displaced leaves after the given block would be finalized.
///
/// The returned leaves do not contain the leaves from the same height as `block_number`.
fn displaced_leaves_after_finalizing(
&self,
block_number: NumberFor<Block>,
) -> Result<Vec<Block::Hash>>;
/// Return hashes of all blocks that are children of the block with `parent_hash`.
fn children(&self, parent_hash: Block::Hash) -> Result<Vec<Block::Hash>>;
/// Get the most recent block hash of the longest chain that contains
/// a block with the given `base_hash`.
///
/// The search space is always limited to blocks which are in the finalized
/// chain or descendents of it.
///
/// Returns `Ok(None)` if `base_hash` is not found in search space.
// TODO: document time complexity of this, see [#1444](https://github.com/paritytech/substrate/issues/1444)
fn longest_containing(
&self,
base_hash: Block::Hash,
import_lock: &RwLock<()>,
) -> Result<Option<Block::Hash>> {
let Some(base_header) = self.header(base_hash)? else { return Ok(None) };
let leaves = {
// ensure no blocks are imported during this code block.
// an import could trigger a reorg which could change the canonical chain.
// we depend on the canonical chain staying the same during this code block.
let _import_guard = import_lock.read();
let info = self.info();
if info.finalized_number > *base_header.number() {
// `base_header` is on a dead fork.
return Ok(None)
}
self.leaves()?
};
// for each chain. longest chain first. shortest last
for leaf_hash in leaves {
let mut current_hash = leaf_hash;
// go backwards through the chain (via parent links)
loop {
if current_hash == base_hash {
return Ok(Some(leaf_hash))
}
let current_header = self
.header(current_hash)?
.ok_or_else(|| Error::MissingHeader(current_hash.to_string()))?;
// stop search in this chain once we go below the target's block number
if current_header.number() < base_header.number() {
break
}
current_hash = *current_header.parent_hash();
}
}
// header may be on a dead fork -- the only leaves that are considered are
// those which can still be finalized.
//
// FIXME #1558 only issue this warning when not on a dead fork
warn!(
"Block {:?} exists in chain but not found when following all leaves backwards",
base_hash,
);
Ok(None)
|
{
break
}
|
conditional_block
|
backend.rs
|
}
/// Convert an arbitrary block ID into a block hash.
fn block_number_from_id(&self, id: &BlockId<Block>) -> Result<Option<NumberFor<Block>>> {
match *id {
BlockId::Hash(h) => self.number(h),
BlockId::Number(n) => Ok(Some(n)),
}
}
/// Get block header. Returns `UnknownBlock` error if block is not found.
fn expect_header(&self, hash: Block::Hash) -> Result<Block::Header> {
self.header(hash)?
.ok_or_else(|| Error::UnknownBlock(format!("Expect header: {}", hash)))
}
/// Convert an arbitrary block ID into a block number. Returns `UnknownBlock` error if block is
/// not found.
fn expect_block_number_from_id(&self, id: &BlockId<Block>) -> Result<NumberFor<Block>> {
self.block_number_from_id(id).and_then(|n| {
n.ok_or_else(|| Error::UnknownBlock(format!("Expect block number from id: {}", id)))
})
}
/// Convert an arbitrary block ID into a block hash. Returns `UnknownBlock` error if block is
/// not found.
fn expect_block_hash_from_id(&self, id: &BlockId<Block>) -> Result<Block::Hash> {
self.block_hash_from_id(id).and_then(|h| {
h.ok_or_else(|| Error::UnknownBlock(format!("Expect block hash from id: {}", id)))
})
}
}
/// Handles stale forks.
pub trait ForkBackend<Block: BlockT>:
HeaderMetadata<Block> + HeaderBackend<Block> + Send + Sync
{
/// Best effort to get all the header hashes that are part of the provided forks
/// starting only from the fork heads.
///
/// The function tries to reconstruct the route from the fork head to the canonical chain.
/// If any of the hashes on the route can't be found in the db, the function won't be able
/// to reconstruct the route anymore. In this case it will give up expanding the current fork,
/// move on to the next ones and at the end it will return an error that also contains
/// the partially expanded forks.
fn expand_forks(
&self,
fork_heads: &[Block::Hash],
) -> std::result::Result<BTreeSet<Block::Hash>, (BTreeSet<Block::Hash>, Error)> {
let mut missing_blocks = vec![];
let mut expanded_forks = BTreeSet::new();
for fork_head in fork_heads {
let mut route_head = *fork_head;
// Insert stale blocks hashes until canonical chain is reached.
// If we reach a block that is already part of the `expanded_forks` we can stop
// processing the fork.
while expanded_forks.insert(route_head) {
match self.header_metadata(route_head) {
Ok(meta) => {
// If the parent is part of the canonical chain or there doesn't exist a
// block hash for the parent number (bug?!), we can abort adding blocks.
let parent_number = meta.number.saturating_sub(1u32.into());
match self.hash(parent_number) {
Ok(Some(parent_hash)) =>
if parent_hash == meta.parent {
break
},
Ok(None) | Err(_) => {
missing_blocks.push(BlockId::<Block>::Number(parent_number));
break
},
}
route_head = meta.parent;
},
Err(_e) => {
missing_blocks.push(BlockId::<Block>::Hash(route_head));
break
},
}
}
}
if !missing_blocks.is_empty() {
return Err((
expanded_forks,
Error::UnknownBlocks(format!(
"Missing stale headers {:?} while expanding forks {:?}.",
fork_heads, missing_blocks
)),
))
}
Ok(expanded_forks)
}
}
impl<Block, T> ForkBackend<Block> for T
where
Block: BlockT,
T: HeaderMetadata<Block> + HeaderBackend<Block> + Send + Sync,
{
}
/// Blockchain database backend. Does not perform any validation.
pub trait Backend<Block: BlockT>:
HeaderBackend<Block> + HeaderMetadata<Block, Error = Error>
{
/// Get block body. Returns `None` if block is not found.
fn body(&self, hash: Block::Hash) -> Result<Option<Vec<<Block as BlockT>::Extrinsic>>>;
/// Get block justifications. Returns `None` if no justification exists.
fn justifications(&self, hash: Block::Hash) -> Result<Option<Justifications>>;
/// Get last finalized block hash.
fn last_finalized(&self) -> Result<Block::Hash>;
/// Returns hashes of all blocks that are leaves of the block tree.
/// in other words, that have no children, are chain heads.
/// Results must be ordered best (longest, highest) chain first.
fn leaves(&self) -> Result<Vec<Block::Hash>>;
/// Returns displaced leaves after the given block would be finalized.
///
/// The returned leaves do not contain the leaves from the same height as `block_number`.
fn displaced_leaves_after_finalizing(
&self,
block_number: NumberFor<Block>,
) -> Result<Vec<Block::Hash>>;
/// Return hashes of all blocks that are children of the block with `parent_hash`.
fn children(&self, parent_hash: Block::Hash) -> Result<Vec<Block::Hash>>;
/// Get the most recent block hash of the longest chain that contains
/// a block with the given `base_hash`.
///
/// The search space is always limited to blocks which are in the finalized
/// chain or descendents of it.
///
/// Returns `Ok(None)` if `base_hash` is not found in search space.
// TODO: document time complexity of this, see [#1444](https://github.com/paritytech/substrate/issues/1444)
fn longest_containing(
&self,
base_hash: Block::Hash,
import_lock: &RwLock<()>,
) -> Result<Option<Block::Hash>> {
let Some(base_header) = self.header(base_hash)? else { return Ok(None) };
let leaves = {
// ensure no blocks are imported during this code block.
// an import could trigger a reorg which could change the canonical chain.
// we depend on the canonical chain staying the same during this code block.
let _import_guard = import_lock.read();
let info = self.info();
if info.finalized_number > *base_header.number() {
// `base_header` is on a dead fork.
return Ok(None)
}
self.leaves()?
};
// for each chain. longest chain first. shortest last
for leaf_hash in leaves {
let mut current_hash = leaf_hash;
// go backwards through the chain (via parent links)
loop {
if current_hash == base_hash {
return Ok(Some(leaf_hash))
}
let current_header = self
.header(current_hash)?
.ok_or_else(|| Error::MissingHeader(current_hash.to_string()))?;
// stop search in this chain once we go below the target's block number
if current_header.number() < base_header.number() {
break
}
current_hash = *current_header.parent_hash();
}
}
// header may be on a dead fork -- the only leaves that are considered are
// those which can still be finalized.
//
// FIXME #1558 only issue this warning when not on a dead fork
warn!(
"Block {:?} exists in chain but not found when following all leaves backwards",
base_hash,
);
Ok(None)
}
/// Get single indexed transaction by content hash. Note that this will only fetch transactions
/// that are indexed by the runtime with `storage_index_transaction`.
fn indexed_transaction(&self, hash: Block::Hash) -> Result<Option<Vec<u8>>>;
/// Check if indexed transaction exists.
fn has_indexed_transaction(&self, hash: Block::Hash) -> Result<bool> {
Ok(self.indexed_transaction(hash)?.is_some())
}
fn block_indexed_body(&self, hash: Block::Hash) -> Result<Option<Vec<Vec<u8>>>>;
}
/// Blockchain info
#[derive(Debug, Eq, PartialEq)]
pub struct Info<Block: BlockT> {
/// Best block hash.
pub best_hash: Block::Hash,
/// Best block number.
pub best_number: <<Block as BlockT>::Header as HeaderT>::Number,
/// Genesis block hash.
pub genesis_hash: Block::Hash,
/// The head of the finalized chain.
pub finalized_hash: Block::Hash,
/// Last finalized block number.
pub finalized_number: <<Block as BlockT>::Header as HeaderT>::Number,
/// Last finalized state.
pub finalized_state: Option<(Block::Hash, <<Block as BlockT>::Header as HeaderT>::Number)>,
/// Number of concurrent leave forks.
pub number_leaves: usize,
/// Missing blocks after warp sync. (start, end).
pub block_gap: Option<(NumberFor<Block>, NumberFor<Block>)>,
}
/// Block status.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
pub enum BlockStatus {
/// Already in the blockchain.
InChain,
/// Not in the queue or the blockchain.
|
random_line_split
|
|
webpack.config.js
|
WebpackManifestPlugin: ManifestPlugin,
} = require("webpack-manifest-plugin");
const postcssNormalize = require("postcss-normalize");
const paths = require("./paths");
const { StonksWatcherWidget } = require("../widget.config");
const isDevelopment = process.env.NODE_ENV === "development";
const appPackageJson = require(paths.appPackageJson);
const imageInlineSizeLimit = parseInt(
process.env.IMAGE_INLINE_SIZE_LIMIT || "10000"
);
const getStyleLoaders = (cssOptions, preProcessor) => {
const loaders = [
isDevelopment && require.resolve("style-loader"),
!isDevelopment && {
loader: MiniCssExtractPlugin.loader,
// css is located in `static/css`, use '../../' to locate index.html folder
// in production `paths.publicUrlOrPath` can be a relative path
options: paths.publicUrlOrPath.startsWith(".")
? { publicPath: "../../" }
: {},
},
{
loader: require.resolve("css-loader"),
options: cssOptions,
},
{
loader: require.resolve("postcss-loader"),
options: {
postcssOptions: {
ident: "postcss",
plugins: [
require("postcss-flexbugs-fixes"),
require("postcss-preset-env")({
autoprefixer: {
flexbox: "no-2009",
},
stage: 3,
|
],
},
sourceMap: true,
},
},
].filter(Boolean);
if (preProcessor) {
loaders.push(
{
loader: require.resolve("resolve-url-loader"),
options: {
sourceMap: true,
root: paths.appSrc,
},
},
{
loader: require.resolve(preProcessor),
options: {
sourceMap: true,
},
}
);
}
return loaders;
};
module.exports = {
mode: isDevelopment ? "development" : "production",
bail: !isDevelopment,
devtool: isDevelopment ? "cheap-module-source-map" : "source-map",
devServer: {
contentBase: paths.appBuild,
port: 3002,
},
entry: paths.appIndexJs,
output: {
path: paths.appBuild,
pathinfo: isDevelopment,
filename: isDevelopment
? "static/js/[name].[contenthash:8].js"
: "static/js/[name].js",
chunkFilename: isDevelopment
? "static/js/[name].[contenthash:8].chunk.js"
: "static/js/[name].chunk.js",
publicPath: paths.publicUrlOrPath,
chunkLoadingGlobal: `webpackJsonp__${appPackageJson.name
.replace("@", "")
.replace("/", "_")}`,
globalObject: "this",
devtoolModuleFilenameTemplate: isDevelopment
? (info) => path.resolve(info.absoluteResourcePath).replace(/\\/g, "/")
: (info) =>
path
.relative(paths.appSrc, info.absoluteResourcePath)
.replace(/\\/g, "/"),
},
optimization: {
minimize: !isDevelopment,
minimizer: [
new TerserPlugin({
terserOptions: {
parse: {
ecma: 8,
},
compress: {
ecma: 5,
warnings: false,
comparisons: false,
inline: 2,
},
mangle: {
safari10: true,
},
keep_classnames: !isDevelopment,
keep_fnames: !isDevelopment,
output: {
ecma: 5,
comments: false,
ascii_only: true,
},
},
}),
],
splitChunks: {
chunks: "all",
name: false,
},
runtimeChunk: false,
},
resolve: {
extensions: paths.moduleFileExtensions
.map((ext) => `.${ext}`)
.filter((ext) => true || !ext.includes("ts")),
alias: {},
fallback: {
module: false,
dgram: false,
dns: false,
fs: false,
http2: false,
net: false,
tls: false,
child_process: false,
},
},
module: {
strictExportPresence: true,
rules: [
// Disable require.ensure as it's not a standard language feature.
{ parser: { requireEnsure: false } },
{
// "oneOf" will traverse all following loaders until one will
// match the requirements. When no loader matches it will fall
// back to the "file" loader at the end of the loader list.
oneOf: [
{
test: [/\.avif$/],
loader: require.resolve("url-loader"),
options: {
limit: imageInlineSizeLimit,
mimetype: "image/avif",
name: "static/media/[name].[hash:8].[ext]",
},
},
{
test: [/\.bmp$/, /\.gif$/, /\.jpe?g$/, /\.png$/],
loader: require.resolve("url-loader"),
options: {
limit: imageInlineSizeLimit,
name: "static/media/[name].[hash:8].[ext]",
},
},
// Process application JS with Babel.
// The preset includes JSX, Flow, TypeScript, and some ESnext features.
{
test: /\.(js|mjs|jsx|ts|tsx)$/,
include: paths.appSrc,
loader: require.resolve("babel-loader"),
options: {
// This is a feature of `babel-loader` for webpack (not Babel itself).
// It enables caching results in ./node_modules/.cache/babel-loader/
// directory for faster rebuilds.
cacheDirectory: true,
// See #6846 for context on why cacheCompression is disabled
cacheCompression: false,
compact: !isDevelopment,
},
},
// Process any JS outside of the app with Babel.
// Unlike the application JS, we only compile the standard ES features.
{
test: /\.(js|mjs)$/,
exclude: /@babel(?:\/|\\{1,2})runtime/,
loader: require.resolve("babel-loader"),
options: {
babelrc: false,
configFile: false,
compact: false,
presets: [],
cacheDirectory: true,
cacheCompression: false,
// Babel sourcemaps are needed for debugging into node_modules
// code. Without the options below, debuggers like VSCode
// show incorrect code and set breakpoints on the wrong lines.
sourceMaps: !isDevelopment,
inputSourceMap: !isDevelopment,
},
},
{
test: /\.css$/,
use: getStyleLoaders({
importLoaders: 1,
sourceMap: true,
}),
sideEffects: true,
},
{
test: /\.(scss|sass)$/,
exclude: /\.module\.(scss|sass)$/,
use: getStyleLoaders(
{
importLoaders: 3,
sourceMap: true,
},
"sass-loader"
),
sideEffects: true,
},
{
test: /\.module\.(scss|sass)$/,
use: getStyleLoaders(
{
importLoaders: 3,
sourceMap: true,
modules: {
localIdentName: `${StonksWatcherWidget.id}--[local]--[hash:base64:5]`,
},
},
"sass-loader"
),
sideEffects: true,
},
// that fall through the other loaders.
{
loader: require.resolve("file-loader"),
// Exclude `js` files to keep "css" loader working as it injects
// its runtime that would otherwise be processed through "file" loader.
// Also exclude `html` and `json` extensions so they get processed
// by webpacks internal loaders.
exclude: [/\.(js|mjs|jsx|ts|tsx)$/, /\.html$/, /\.json$/],
options: {
name: "static/media/[name].[hash:8].[ext]",
},
},
// ** STOP ** Are you adding a new loader?
// Make sure to add the new loader(s) before the "file" loader.
],
},
],
},
plugins: [
new ModuleFederationPlugin({
name: StonksWatcherWidget.id,
library: {
type: "var",
name: StonksWatcherWidget.id,
},
filename: "remoteEntry.js",
remotes: {},
exposes: {
"./": "./src/index",
},
shared: {},
}),
// new HtmlWebpackPlugin(
// Object.assign(
// {},
// {
// inject: true,
// template: paths.appHtml,
// },
// !isDevelopment
// ? {
// minify: {
// removeComments: true,
// collapseWhitespace: true,
// removeRedundantAttributes: true,
// useShortDoctype: true,
// removeEmptyAttributes: true,
// removeStyleLinkTypeAttributes: true,
// keepClosingSlash: true,
// minifyJS: true,
// minifyCSS: true,
// minifyURLs: true,
// },
// }
// : undefined
// )
// ),
|
}),
postcssNormalize(),
|
random_line_split
|
webpack.config.js
|
WebpackManifestPlugin: ManifestPlugin,
} = require("webpack-manifest-plugin");
const postcssNormalize = require("postcss-normalize");
const paths = require("./paths");
const { StonksWatcherWidget } = require("../widget.config");
const isDevelopment = process.env.NODE_ENV === "development";
const appPackageJson = require(paths.appPackageJson);
const imageInlineSizeLimit = parseInt(
process.env.IMAGE_INLINE_SIZE_LIMIT || "10000"
);
const getStyleLoaders = (cssOptions, preProcessor) => {
const loaders = [
isDevelopment && require.resolve("style-loader"),
!isDevelopment && {
loader: MiniCssExtractPlugin.loader,
// css is located in `static/css`, use '../../' to locate index.html folder
// in production `paths.publicUrlOrPath` can be a relative path
options: paths.publicUrlOrPath.startsWith(".")
? { publicPath: "../../" }
: {},
},
{
loader: require.resolve("css-loader"),
options: cssOptions,
},
{
loader: require.resolve("postcss-loader"),
options: {
postcssOptions: {
ident: "postcss",
plugins: [
require("postcss-flexbugs-fixes"),
require("postcss-preset-env")({
autoprefixer: {
flexbox: "no-2009",
},
stage: 3,
}),
postcssNormalize(),
],
},
sourceMap: true,
},
},
].filter(Boolean);
if (preProcessor)
|
return loaders;
};
module.exports = {
mode: isDevelopment ? "development" : "production",
bail: !isDevelopment,
devtool: isDevelopment ? "cheap-module-source-map" : "source-map",
devServer: {
contentBase: paths.appBuild,
port: 3002,
},
entry: paths.appIndexJs,
output: {
path: paths.appBuild,
pathinfo: isDevelopment,
filename: isDevelopment
? "static/js/[name].[contenthash:8].js"
: "static/js/[name].js",
chunkFilename: isDevelopment
? "static/js/[name].[contenthash:8].chunk.js"
: "static/js/[name].chunk.js",
publicPath: paths.publicUrlOrPath,
chunkLoadingGlobal: `webpackJsonp__${appPackageJson.name
.replace("@", "")
.replace("/", "_")}`,
globalObject: "this",
devtoolModuleFilenameTemplate: isDevelopment
? (info) => path.resolve(info.absoluteResourcePath).replace(/\\/g, "/")
: (info) =>
path
.relative(paths.appSrc, info.absoluteResourcePath)
.replace(/\\/g, "/"),
},
optimization: {
minimize: !isDevelopment,
minimizer: [
new TerserPlugin({
terserOptions: {
parse: {
ecma: 8,
},
compress: {
ecma: 5,
warnings: false,
comparisons: false,
inline: 2,
},
mangle: {
safari10: true,
},
keep_classnames: !isDevelopment,
keep_fnames: !isDevelopment,
output: {
ecma: 5,
comments: false,
ascii_only: true,
},
},
}),
],
splitChunks: {
chunks: "all",
name: false,
},
runtimeChunk: false,
},
resolve: {
extensions: paths.moduleFileExtensions
.map((ext) => `.${ext}`)
.filter((ext) => true || !ext.includes("ts")),
alias: {},
fallback: {
module: false,
dgram: false,
dns: false,
fs: false,
http2: false,
net: false,
tls: false,
child_process: false,
},
},
module: {
strictExportPresence: true,
rules: [
// Disable require.ensure as it's not a standard language feature.
{ parser: { requireEnsure: false } },
{
// "oneOf" will traverse all following loaders until one will
// match the requirements. When no loader matches it will fall
// back to the "file" loader at the end of the loader list.
oneOf: [
{
test: [/\.avif$/],
loader: require.resolve("url-loader"),
options: {
limit: imageInlineSizeLimit,
mimetype: "image/avif",
name: "static/media/[name].[hash:8].[ext]",
},
},
{
test: [/\.bmp$/, /\.gif$/, /\.jpe?g$/, /\.png$/],
loader: require.resolve("url-loader"),
options: {
limit: imageInlineSizeLimit,
name: "static/media/[name].[hash:8].[ext]",
},
},
// Process application JS with Babel.
// The preset includes JSX, Flow, TypeScript, and some ESnext features.
{
test: /\.(js|mjs|jsx|ts|tsx)$/,
include: paths.appSrc,
loader: require.resolve("babel-loader"),
options: {
// This is a feature of `babel-loader` for webpack (not Babel itself).
// It enables caching results in ./node_modules/.cache/babel-loader/
// directory for faster rebuilds.
cacheDirectory: true,
// See #6846 for context on why cacheCompression is disabled
cacheCompression: false,
compact: !isDevelopment,
},
},
// Process any JS outside of the app with Babel.
// Unlike the application JS, we only compile the standard ES features.
{
test: /\.(js|mjs)$/,
exclude: /@babel(?:\/|\\{1,2})runtime/,
loader: require.resolve("babel-loader"),
options: {
babelrc: false,
configFile: false,
compact: false,
presets: [],
cacheDirectory: true,
cacheCompression: false,
// Babel sourcemaps are needed for debugging into node_modules
// code. Without the options below, debuggers like VSCode
// show incorrect code and set breakpoints on the wrong lines.
sourceMaps: !isDevelopment,
inputSourceMap: !isDevelopment,
},
},
{
test: /\.css$/,
use: getStyleLoaders({
importLoaders: 1,
sourceMap: true,
}),
sideEffects: true,
},
{
test: /\.(scss|sass)$/,
exclude: /\.module\.(scss|sass)$/,
use: getStyleLoaders(
{
importLoaders: 3,
sourceMap: true,
},
"sass-loader"
),
sideEffects: true,
},
{
test: /\.module\.(scss|sass)$/,
use: getStyleLoaders(
{
importLoaders: 3,
sourceMap: true,
modules: {
localIdentName: `${StonksWatcherWidget.id}--[local]--[hash:base64:5]`,
},
},
"sass-loader"
),
sideEffects: true,
},
// that fall through the other loaders.
{
loader: require.resolve("file-loader"),
// Exclude `js` files to keep "css" loader working as it injects
// its runtime that would otherwise be processed through "file" loader.
// Also exclude `html` and `json` extensions so they get processed
// by webpacks internal loaders.
exclude: [/\.(js|mjs|jsx|ts|tsx)$/, /\.html$/, /\.json$/],
options: {
name: "static/media/[name].[hash:8].[ext]",
},
},
// ** STOP ** Are you adding a new loader?
// Make sure to add the new loader(s) before the "file" loader.
],
},
],
},
plugins: [
new ModuleFederationPlugin({
name: StonksWatcherWidget.id,
library: {
type: "var",
name: StonksWatcherWidget.id,
},
filename: "remoteEntry.js",
remotes: {},
exposes: {
"./": "./src/index",
},
shared: {},
}),
// new HtmlWebpackPlugin(
// Object.assign(
// {},
// {
// inject: true,
// template: paths.appHtml,
// },
// !isDevelopment
// ? {
// minify: {
// removeComments: true,
// collapseWhitespace: true,
// removeRedundantAttributes: true,
// useShortDoctype: true,
// removeEmptyAttributes: true,
// removeStyleLinkTypeAttributes: true,
// keepClosingSlash: true,
// minifyJS: true,
// minifyCSS: true,
// minifyURLs: true,
// },
// }
// : undefined
// )
|
{
loaders.push(
{
loader: require.resolve("resolve-url-loader"),
options: {
sourceMap: true,
root: paths.appSrc,
},
},
{
loader: require.resolve(preProcessor),
options: {
sourceMap: true,
},
}
);
}
|
conditional_block
|
retrieval.py
|
chunk_bytes: int = CSV_CHUNK_BYTES) -> str:
"""Download file as stream checking filesize and retrying (if able)"""
for _ in range(reps):
# stream from source to avoid MemoryError for very large (>10Gb) files
fd, local_filename = tempfile.mkstemp(dir=tempdir)
with requests.get(url, headers=headers, stream=True) as r:
r.raise_for_status()
# check if filesize reported and validate download if possible
expected_size = int(r.headers["content-length"]
if "content-length" in r.headers.keys() else 0)
logger.info(f"Starting file download, expected size: {expected_size}")
with os.fdopen(fd, 'wb') as f:
for chunk in r.iter_content(chunk_size=chunk_bytes):
if chunk:
f.write(chunk)
f.flush()
# confirm download completed successfully
received_size = os.path.getsize(local_filename)
if expected_size == 0 or received_size >= expected_size:
return local_filename
logger.info(f"File download incomplete (expected {expected_size} got {received_size})")
logger.info(f"Sleeping for {sleeptime} secs...")
os.remove(local_filename)
time.sleep(sleeptime)
raise requests.exceptions.RequestException("File download failed.")
def new_file_with_header(header):
'''Initialise a new temp file with the given header line'''
fd, file_name = tempfile.mkstemp()
with os.fdopen(fd, "w") as file:
file.writelines(header)
file.close()
return file_name
def sort_file_preserve_header(out_filename, in_filename):
'''Sort input file to output file, preserving the header'''
with open(in_filename, "r") as infile:
header = infile.readline()
with open(out_filename, "w") as outfile:
outfile.writelines(header)
with open(out_filename, "a") as outfile:
body = subprocess.Popen(('tail', '--lines', '+2', in_filename),
stdout=subprocess.PIPE)
subprocess.run(('sort'), stdin=body.stdout, stdout=outfile)
body.wait()
def find_source_name_in_ingestion_queue(
source_name: str | None,
env: str) -> bool:
"""Check for running or queued batch processes with source_name
Already running (or queued) processes could compromise the delta-ingestion
processes
"""
# snapshot ingestion-queue for active processes
if source_name:
logger.info("Deltas: Snapshot batch processes")
batch_client = boto3.client("batch")
jobs: List[Dict] = []
for jobStatus in IN_PROGRESS_STATUS:
r = batch_client.list_jobs(
jobQueue='ingestion-queue',
jobStatus=jobStatus)
jobs.extend(r['jobSummaryList'])
logger.info(jobs)
# Be careful here - names are not always immediately obvious:
# e.g. 'ch_zurich-zurich-ingestor-prod'
# 'brazil_srag-srag-ingestor-prod'
# workaround: check variations in naming
if list(filter(lambda x: x['jobName'].startswith(
f'{source_name}-{source_name}-ingestor'), jobs)):
logger.info("Deltas: Ongoing batch jobs relating to source found. "
"Abandoning deltas generation.")
return True
if list(filter(lambda x: x['jobName'].endswith(
f'-{source_name}-ingestor-{env}'), jobs)):
|
if list(filter(lambda x: x['jobName'].startswith(
f'{source_name}-'), jobs)):
logger.info("Deltas: Ongoing batch jobs relating to source found. "
"Abandoning deltas generation.")
return True
return False
def generate_deltas(env: str, latest_filename: str, uploads: List[dict],
s3_bucket: str, source_id: str, source_format: str,
sort_sources: bool = True,
bulk_ingest_on_reject: bool = True,
) -> Tuple[str | None, str | None]:
"""Check last valid ingestion and return the filenames of ADD/DEL deltas
:param latest_filename: Filename of latest source line list from country (local copy)
:param uploads: List of uploads history for this source
:param s3_bucket: S3 bucket used to store retrieved line lists and deltas
:param source_id: UUID for the upload ingestor
:param source_format: Format of source file ('CSV', 'JSON', 'XLSX',...)
:param sort_sources: Should sources be sorted before computing deltas. This
is initially slower, but can drastically reduce the number of lines
added and removed following difference determination (recommended).
:param bulk_ingest_on_reject: Should we revert to bulk ingestion if the
most recent delta ingestion failed?
'delta' refers to the difference between the full upload at the previous
successful ingestion, whether that ingestion was a 'bulk' upload (overwriting
all line list content), or a delta update. As such the 'current' full source
file is always uploaded, whether delta files are generated or not.
return: (deltas_add_file_name, deltas_del_file_name)
Both, either or neither of these can be None, signifying no deltas,
or a processing issue which defaults to bulk ingestion
"""
logger.info("Deltas: Attempting to generate ingestion deltas file...")
reject_deltas = None, None
if source_format != 'CSV':
logger.info(f"Deltas: upsupported filetype ({source_format}) for deltas generation")
return reject_deltas
# Check for an uploads history before attempting to process
if not uploads:
return reject_deltas
# Check that no source_id relevant processes are cued or running
source_name = source_id
if find_source_name_in_ingestion_queue(source_name, env):
return reject_deltas
# identify last successful ingestion source
uploads.sort(key=lambda x: x["created"], reverse=False) # most recent last
if not (last_successful_ingest_list := list(filter(
lambda x: x['status'] == 'SUCCESS', uploads))):
logger.info("Deltas: No previous successful ingestions found.")
return reject_deltas
last_successful_ingest = last_successful_ingest_list[-1]
d = parse_datetime(last_successful_ingest['created'])
# identify last successful 'bulk' ingestion
if not (bulk_ingestion := list(filter(
lambda x: (x['status'] == 'SUCCESS')
and (('deltas' not in x) or (x['deltas'] is None)),
uploads))):
logger.info("Deltas: Cannot identify last successful bulk upload")
return reject_deltas
# check that no rejected deltas exist after the last successful bulk upload
# as this would desynchronise the database; if so, revert to bulk ingestion
# this time around.
# Note: This is necessary since Add and Del deltas are given different upload
# id's so that both are processed during pruning. A failure in one (but not
# the other) would desynchonise the database from their associated
# retrieval sources.
if bulk_ingest_on_reject and list(filter(
lambda x: ('deltas' in x) and x['deltas']
and ('accepted' in x) and not x['accepted'],
uploads[uploads.index(bulk_ingestion[0]) + 1:])):
logger.info("Deltas: rejected deltas identified in upload history, "
"abandoning deltas generation")
return reject_deltas
# retrieve last good ingestion source
_, last_ingested_file_name = tempfile.mkstemp()
s3_key = f"{source_id}{d.strftime(TIME_FILEPART_FORMAT)}content.csv"
logger.info(f"Deltas: Identified last good ingestion source at: {s3_bucket}/{s3_key}")
s3_client.download_file(s3_bucket, s3_key, last_ingested_file_name)
logger.info(f"Deltas: Retrieved last good ingestion source: {last_ingested_file_name}")
# confirm that reference (previously ingested file) and latest headers match
with open(last_ingested_file_name, "r") as last_ingested_file:
last_ingested_header = last_ingested_file.readline()
with open(latest_filename, "r") as lastest_file:
latest_header = lastest_file.readline()
if latest_header != last_ingested_header:
logger.info("Deltas: Headers do not match - abandoning deltas")
return reject_deltas
# generate deltas files (additions and removals) with correct headers
try:
if sort_sources:
logger.info("Deltas: Sorting source files (initially slower but "
"produces fewer deltas)")
# sort the source files - this is slower but produces fewer deltas
_, early_file_name = tempfile.mkstemp()
sort_file_preserve_header(early_file_name, last_ingested_file_name)
logger.info("Deltas: Sorted file for last successful ingestion: "
f"{early_file_name}")
_, later_file_name = tempfile.mkstemp()
sort_file_preserve_header(later_file_name, latest_filename)
logger.info("Deltas: Sorted file for latest source file: "
f"{later_file_name}")
else:
early_file_name = last
|
logger.info("Deltas: Ongoing batch jobs relating to source found. "
"Abandoning deltas generation.")
return True
|
conditional_block
|
retrieval.py
|
_id}{d.strftime(TIME_FILEPART_FORMAT)}content.csv"
logger.info(f"Deltas: Identified last good ingestion source at: {s3_bucket}/{s3_key}")
s3_client.download_file(s3_bucket, s3_key, last_ingested_file_name)
logger.info(f"Deltas: Retrieved last good ingestion source: {last_ingested_file_name}")
# confirm that reference (previously ingested file) and latest headers match
with open(last_ingested_file_name, "r") as last_ingested_file:
last_ingested_header = last_ingested_file.readline()
with open(latest_filename, "r") as lastest_file:
latest_header = lastest_file.readline()
if latest_header != last_ingested_header:
logger.info("Deltas: Headers do not match - abandoning deltas")
return reject_deltas
# generate deltas files (additions and removals) with correct headers
try:
if sort_sources:
logger.info("Deltas: Sorting source files (initially slower but "
"produces fewer deltas)")
# sort the source files - this is slower but produces fewer deltas
_, early_file_name = tempfile.mkstemp()
sort_file_preserve_header(early_file_name, last_ingested_file_name)
logger.info("Deltas: Sorted file for last successful ingestion: "
f"{early_file_name}")
_, later_file_name = tempfile.mkstemp()
sort_file_preserve_header(later_file_name, latest_filename)
logger.info("Deltas: Sorted file for latest source file: "
f"{later_file_name}")
else:
early_file_name = last_ingested_file_name
later_file_name = latest_filename
# 'comm' command has an annoying incompatibility between linux and mac
nocheck_flag = ["--nocheck-order"] # linux requires this if not sorted
if platform == "darwin": # but mac does not support the flag
nocheck_flag = []
# generate additions file (or return filename: None)
deltas_add_file_name = new_file_with_header(latest_header)
deltas_add_file = open(deltas_add_file_name, "a")
initial_file_size = deltas_add_file.tell()
if subprocess.run(["/usr/bin/env",
"comm",
"-13", # Suppress unique lines from file1 and common
early_file_name,
later_file_name
] + nocheck_flag,
stdout=deltas_add_file).returncode > 0:
logger.error("Deltas: second comm command returned an error code")
return reject_deltas
if deltas_add_file.tell() == initial_file_size:
deltas_add_file_name = None
deltas_add_file.close()
# generate removals file (or return filename: None)
deltas_del_file_name = new_file_with_header(latest_header)
deltas_del_file = open(deltas_del_file_name, "a")
initial_file_size = deltas_del_file.tell()
if subprocess.run(["/usr/bin/env",
"comm",
"-23", # Suppress unique lines from file2 and common
early_file_name,
later_file_name
] + nocheck_flag,
stdout=deltas_del_file).returncode > 0:
logger.error("Deltas: first comm command returned an error code")
return reject_deltas
if deltas_del_file.tell() == initial_file_size:
deltas_del_file_name = None
deltas_del_file.close()
except subprocess.CalledProcessError as e:
logger.error(f"Deltas: Process error during call to comm command: {e}")
return reject_deltas
# finally, check that the deltas aren't replacing most of the source file,
# wherein we would be better to simply re-ingest the full source and reset
# delta tracking (remembering that Del deltas accumulate records in the DB)
if deltas_del_file_name:
if (os.path.getsize(deltas_del_file_name)
> (0.5 * os.path.getsize(last_ingested_file_name))):
return reject_deltas
return deltas_add_file_name, deltas_del_file_name
def parse_datetime(date_str: str) -> datetime:
"""Isolate functionality to facilitate easier mocking"""
return dateutil.parser.parse(date_str)
def retrieve_content(env, source_id, upload_id, url, source_format,
api_headers, cookies, chunk_bytes=CSV_CHUNK_BYTES,
tempdir=TEMP_PATH, uploads_history={},
bucket=OUTPUT_BUCKET):
""" Retrieves and locally persists the content at the provided URL. """
try:
if (source_format != "JSON"
and source_format != "CSV"
and source_format != "XLSX"):
e = ValueError(f"Unsupported source format: {source_format}")
common_lib.complete_with_error(
e, env, common_lib.UploadError.SOURCE_CONFIGURATION_ERROR,
source_id, upload_id, api_headers, cookies)
logger.info(f"Downloading {source_format} content from {url}")
if url.startswith("s3://"):
# strip the prefix
s3Location = url[5:]
# split at the first /
[s3Bucket, s3Key] = s3Location.split('/', 1)
# get it!
_, local_filename = tempfile.mkstemp(dir=tempdir)
s3_client.download_file(s3Bucket, s3Key, local_filename)
else:
headers = {"user-agent": "GHDSI/1.0 (https://global.health)"}
local_filename = download_file_stream(url, headers, tempdir)
logger.info("Download finished")
# Match upload s3 key (bucket folder) to upload timestamp (if available)
try:
today = parse_datetime(
list(filter(lambda x: x['_id'] == upload_id,
uploads_history))[-1]['created'])
except (IndexError, TypeError, KeyError) as e:
logger.error(f"Error retrieving file upload datetime stamp: {e}")
today = datetime.now(timezone.utc)
key_filename_part = f"content.{source_format.lower()}"
s3_object_key = (
f"{source_id}"
f"{today.strftime(TIME_FILEPART_FORMAT)}"
f"{key_filename_part}"
)
# Make the encoding of retrieved content consistent (UTF-8) for all
# parsers as per https://github.com/globaldothealth/list/issues/867.
bytes_filename = raw_content_fileconvert(url, local_filename, tempdir)
logging.info(f"Filename after conversion: {bytes_filename}")
if source_format == "XLSX":
# do not convert XLSX into another encoding, leave for parsers
logger.warning("Skipping encoding detection for XLSX")
outfile = bytes_filename
else:
logger.info("Detecting encoding of retrieved content")
# Read 2MB to be quite sure about the encoding.
bytesio = open(bytes_filename, "rb")
detected_enc = detect(bytesio.read(2 << 20))
bytesio.seek(0)
if detected_enc["encoding"]:
logger.info(f"Source encoding is presumably {detected_enc}")
else:
detected_enc["encoding"] = DEFAULT_ENCODING
logger.warning(f"Source encoding detection failed, setting to {DEFAULT_ENCODING}")
fd, outfile_name = tempfile.mkstemp(dir=tempdir)
with os.fdopen(fd, "w", encoding="utf-8") as outfile:
text_stream = codecs.getreader(detected_enc["encoding"])(bytesio)
# Write the output file as utf-8 in chunks because decoding the
# whole data in one shot becomes really slow with big files.
content = text_stream.read(READ_CHUNK_BYTES)
while content:
outfile.write(content)
content = text_stream.read(READ_CHUNK_BYTES)
# always return full source file (but don't parse if deltas generated)
return_list = [(outfile_name, s3_object_key, {})]
# attempt to generate deltas files
deltas_add_file_name, deltas_del_file_name = generate_deltas(
env,
outfile_name,
uploads_history,
bucket,
source_id,
source_format,
sort_sources=True
)
if deltas_add_file_name:
s3_deltas_add_object_key = (
f"{source_id}"
f"{today.strftime(TIME_FILEPART_FORMAT)}"
f"deltasAdd.{source_format.lower()}"
)
logger.info(f"Delta file (ADD): f{deltas_add_file_name}")
return_list[0][2]['parseit'] = False # Turn off bulk upload parsing
return_list.append((deltas_add_file_name,
s3_deltas_add_object_key,
{'deltas': "Add"}))
if deltas_del_file_name:
s3_deltas_del_object_key = (
f"{source_id}"
f"{today.strftime(TIME_FILEPART_FORMAT)}"
f"deltasDel.{source_format.lower()}"
)
logger.info(f"Delta file (DEL): {deltas_del_file_name}")
return_list[0][2]['parseit'] = False # Turn off bulk upload parsing
return_list.append((deltas_del_file_name,
s3_deltas_del_object_key,
{'deltas': "Del"}))
return return_list
except requests.exceptions.RequestException as e:
upload_error = (
common_lib.UploadError.SOURCE_CONTENT_NOT_FOUND
if e.response.status_code == 404 else
common_lib.UploadError.SOURCE_CONTENT_DOWNLOAD_ERROR)
common_lib.complete_with_error(
|
e, env, upload_error, source_id, upload_id,
api_headers, cookies)
|
random_line_split
|
|
retrieval.py
|
: Snapshot batch processes")
batch_client = boto3.client("batch")
jobs: List[Dict] = []
for jobStatus in IN_PROGRESS_STATUS:
r = batch_client.list_jobs(
jobQueue='ingestion-queue',
jobStatus=jobStatus)
jobs.extend(r['jobSummaryList'])
logger.info(jobs)
# Be careful here - names are not always immediately obvious:
# e.g. 'ch_zurich-zurich-ingestor-prod'
# 'brazil_srag-srag-ingestor-prod'
# workaround: check variations in naming
if list(filter(lambda x: x['jobName'].startswith(
f'{source_name}-{source_name}-ingestor'), jobs)):
logger.info("Deltas: Ongoing batch jobs relating to source found. "
"Abandoning deltas generation.")
return True
if list(filter(lambda x: x['jobName'].endswith(
f'-{source_name}-ingestor-{env}'), jobs)):
logger.info("Deltas: Ongoing batch jobs relating to source found. "
"Abandoning deltas generation.")
return True
if list(filter(lambda x: x['jobName'].startswith(
f'{source_name}-'), jobs)):
logger.info("Deltas: Ongoing batch jobs relating to source found. "
"Abandoning deltas generation.")
return True
return False
def generate_deltas(env: str, latest_filename: str, uploads: List[dict],
s3_bucket: str, source_id: str, source_format: str,
sort_sources: bool = True,
bulk_ingest_on_reject: bool = True,
) -> Tuple[str | None, str | None]:
"""Check last valid ingestion and return the filenames of ADD/DEL deltas
:param latest_filename: Filename of latest source line list from country (local copy)
:param uploads: List of uploads history for this source
:param s3_bucket: S3 bucket used to store retrieved line lists and deltas
:param source_id: UUID for the upload ingestor
:param source_format: Format of source file ('CSV', 'JSON', 'XLSX',...)
:param sort_sources: Should sources be sorted before computing deltas. This
is initially slower, but can drastically reduce the number of lines
added and removed following difference determination (recommended).
:param bulk_ingest_on_reject: Should we revert to bulk ingestion if the
most recent delta ingestion failed?
'delta' refers to the difference between the full upload at the previous
successful ingestion, whether that ingestion was a 'bulk' upload (overwriting
all line list content), or a delta update. As such the 'current' full source
file is always uploaded, whether delta files are generated or not.
return: (deltas_add_file_name, deltas_del_file_name)
Both, either or neither of these can be None, signifying no deltas,
or a processing issue which defaults to bulk ingestion
"""
logger.info("Deltas: Attempting to generate ingestion deltas file...")
reject_deltas = None, None
if source_format != 'CSV':
logger.info(f"Deltas: upsupported filetype ({source_format}) for deltas generation")
return reject_deltas
# Check for an uploads history before attempting to process
if not uploads:
return reject_deltas
# Check that no source_id relevant processes are cued or running
source_name = source_id
if find_source_name_in_ingestion_queue(source_name, env):
return reject_deltas
# identify last successful ingestion source
uploads.sort(key=lambda x: x["created"], reverse=False) # most recent last
if not (last_successful_ingest_list := list(filter(
lambda x: x['status'] == 'SUCCESS', uploads))):
logger.info("Deltas: No previous successful ingestions found.")
return reject_deltas
last_successful_ingest = last_successful_ingest_list[-1]
d = parse_datetime(last_successful_ingest['created'])
# identify last successful 'bulk' ingestion
if not (bulk_ingestion := list(filter(
lambda x: (x['status'] == 'SUCCESS')
and (('deltas' not in x) or (x['deltas'] is None)),
uploads))):
logger.info("Deltas: Cannot identify last successful bulk upload")
return reject_deltas
# check that no rejected deltas exist after the last successful bulk upload
# as this would desynchronise the database; if so, revert to bulk ingestion
# this time around.
# Note: This is necessary since Add and Del deltas are given different upload
# id's so that both are processed during pruning. A failure in one (but not
# the other) would desynchonise the database from their associated
# retrieval sources.
if bulk_ingest_on_reject and list(filter(
lambda x: ('deltas' in x) and x['deltas']
and ('accepted' in x) and not x['accepted'],
uploads[uploads.index(bulk_ingestion[0]) + 1:])):
logger.info("Deltas: rejected deltas identified in upload history, "
"abandoning deltas generation")
return reject_deltas
# retrieve last good ingestion source
_, last_ingested_file_name = tempfile.mkstemp()
s3_key = f"{source_id}{d.strftime(TIME_FILEPART_FORMAT)}content.csv"
logger.info(f"Deltas: Identified last good ingestion source at: {s3_bucket}/{s3_key}")
s3_client.download_file(s3_bucket, s3_key, last_ingested_file_name)
logger.info(f"Deltas: Retrieved last good ingestion source: {last_ingested_file_name}")
# confirm that reference (previously ingested file) and latest headers match
with open(last_ingested_file_name, "r") as last_ingested_file:
last_ingested_header = last_ingested_file.readline()
with open(latest_filename, "r") as lastest_file:
latest_header = lastest_file.readline()
if latest_header != last_ingested_header:
logger.info("Deltas: Headers do not match - abandoning deltas")
return reject_deltas
# generate deltas files (additions and removals) with correct headers
try:
if sort_sources:
logger.info("Deltas: Sorting source files (initially slower but "
"produces fewer deltas)")
# sort the source files - this is slower but produces fewer deltas
_, early_file_name = tempfile.mkstemp()
sort_file_preserve_header(early_file_name, last_ingested_file_name)
logger.info("Deltas: Sorted file for last successful ingestion: "
f"{early_file_name}")
_, later_file_name = tempfile.mkstemp()
sort_file_preserve_header(later_file_name, latest_filename)
logger.info("Deltas: Sorted file for latest source file: "
f"{later_file_name}")
else:
early_file_name = last_ingested_file_name
later_file_name = latest_filename
# 'comm' command has an annoying incompatibility between linux and mac
nocheck_flag = ["--nocheck-order"] # linux requires this if not sorted
if platform == "darwin": # but mac does not support the flag
nocheck_flag = []
# generate additions file (or return filename: None)
deltas_add_file_name = new_file_with_header(latest_header)
deltas_add_file = open(deltas_add_file_name, "a")
initial_file_size = deltas_add_file.tell()
if subprocess.run(["/usr/bin/env",
"comm",
"-13", # Suppress unique lines from file1 and common
early_file_name,
later_file_name
] + nocheck_flag,
stdout=deltas_add_file).returncode > 0:
logger.error("Deltas: second comm command returned an error code")
return reject_deltas
if deltas_add_file.tell() == initial_file_size:
deltas_add_file_name = None
deltas_add_file.close()
# generate removals file (or return filename: None)
deltas_del_file_name = new_file_with_header(latest_header)
deltas_del_file = open(deltas_del_file_name, "a")
initial_file_size = deltas_del_file.tell()
if subprocess.run(["/usr/bin/env",
"comm",
"-23", # Suppress unique lines from file2 and common
early_file_name,
later_file_name
] + nocheck_flag,
stdout=deltas_del_file).returncode > 0:
logger.error("Deltas: first comm command returned an error code")
return reject_deltas
if deltas_del_file.tell() == initial_file_size:
deltas_del_file_name = None
deltas_del_file.close()
except subprocess.CalledProcessError as e:
logger.error(f"Deltas: Process error during call to comm command: {e}")
return reject_deltas
# finally, check that the deltas aren't replacing most of the source file,
# wherein we would be better to simply re-ingest the full source and reset
# delta tracking (remembering that Del deltas accumulate records in the DB)
if deltas_del_file_name:
if (os.path.getsize(deltas_del_file_name)
> (0.5 * os.path.getsize(last_ingested_file_name))):
return reject_deltas
return deltas_add_file_name, deltas_del_file_name
def
|
parse_datetime
|
identifier_name
|
|
retrieval.py
|
jobStatus in IN_PROGRESS_STATUS:
r = batch_client.list_jobs(
jobQueue='ingestion-queue',
jobStatus=jobStatus)
jobs.extend(r['jobSummaryList'])
logger.info(jobs)
# Be careful here - names are not always immediately obvious:
# e.g. 'ch_zurich-zurich-ingestor-prod'
# 'brazil_srag-srag-ingestor-prod'
# workaround: check variations in naming
if list(filter(lambda x: x['jobName'].startswith(
f'{source_name}-{source_name}-ingestor'), jobs)):
logger.info("Deltas: Ongoing batch jobs relating to source found. "
"Abandoning deltas generation.")
return True
if list(filter(lambda x: x['jobName'].endswith(
f'-{source_name}-ingestor-{env}'), jobs)):
logger.info("Deltas: Ongoing batch jobs relating to source found. "
"Abandoning deltas generation.")
return True
if list(filter(lambda x: x['jobName'].startswith(
f'{source_name}-'), jobs)):
logger.info("Deltas: Ongoing batch jobs relating to source found. "
"Abandoning deltas generation.")
return True
return False
def generate_deltas(env: str, latest_filename: str, uploads: List[dict],
s3_bucket: str, source_id: str, source_format: str,
sort_sources: bool = True,
bulk_ingest_on_reject: bool = True,
) -> Tuple[str | None, str | None]:
"""Check last valid ingestion and return the filenames of ADD/DEL deltas
:param latest_filename: Filename of latest source line list from country (local copy)
:param uploads: List of uploads history for this source
:param s3_bucket: S3 bucket used to store retrieved line lists and deltas
:param source_id: UUID for the upload ingestor
:param source_format: Format of source file ('CSV', 'JSON', 'XLSX',...)
:param sort_sources: Should sources be sorted before computing deltas. This
is initially slower, but can drastically reduce the number of lines
added and removed following difference determination (recommended).
:param bulk_ingest_on_reject: Should we revert to bulk ingestion if the
most recent delta ingestion failed?
'delta' refers to the difference between the full upload at the previous
successful ingestion, whether that ingestion was a 'bulk' upload (overwriting
all line list content), or a delta update. As such the 'current' full source
file is always uploaded, whether delta files are generated or not.
return: (deltas_add_file_name, deltas_del_file_name)
Both, either or neither of these can be None, signifying no deltas,
or a processing issue which defaults to bulk ingestion
"""
logger.info("Deltas: Attempting to generate ingestion deltas file...")
reject_deltas = None, None
if source_format != 'CSV':
logger.info(f"Deltas: upsupported filetype ({source_format}) for deltas generation")
return reject_deltas
# Check for an uploads history before attempting to process
if not uploads:
return reject_deltas
# Check that no source_id relevant processes are cued or running
source_name = source_id
if find_source_name_in_ingestion_queue(source_name, env):
return reject_deltas
# identify last successful ingestion source
uploads.sort(key=lambda x: x["created"], reverse=False) # most recent last
if not (last_successful_ingest_list := list(filter(
lambda x: x['status'] == 'SUCCESS', uploads))):
logger.info("Deltas: No previous successful ingestions found.")
return reject_deltas
last_successful_ingest = last_successful_ingest_list[-1]
d = parse_datetime(last_successful_ingest['created'])
# identify last successful 'bulk' ingestion
if not (bulk_ingestion := list(filter(
lambda x: (x['status'] == 'SUCCESS')
and (('deltas' not in x) or (x['deltas'] is None)),
uploads))):
logger.info("Deltas: Cannot identify last successful bulk upload")
return reject_deltas
# check that no rejected deltas exist after the last successful bulk upload
# as this would desynchronise the database; if so, revert to bulk ingestion
# this time around.
# Note: This is necessary since Add and Del deltas are given different upload
# id's so that both are processed during pruning. A failure in one (but not
# the other) would desynchonise the database from their associated
# retrieval sources.
if bulk_ingest_on_reject and list(filter(
lambda x: ('deltas' in x) and x['deltas']
and ('accepted' in x) and not x['accepted'],
uploads[uploads.index(bulk_ingestion[0]) + 1:])):
logger.info("Deltas: rejected deltas identified in upload history, "
"abandoning deltas generation")
return reject_deltas
# retrieve last good ingestion source
_, last_ingested_file_name = tempfile.mkstemp()
s3_key = f"{source_id}{d.strftime(TIME_FILEPART_FORMAT)}content.csv"
logger.info(f"Deltas: Identified last good ingestion source at: {s3_bucket}/{s3_key}")
s3_client.download_file(s3_bucket, s3_key, last_ingested_file_name)
logger.info(f"Deltas: Retrieved last good ingestion source: {last_ingested_file_name}")
# confirm that reference (previously ingested file) and latest headers match
with open(last_ingested_file_name, "r") as last_ingested_file:
last_ingested_header = last_ingested_file.readline()
with open(latest_filename, "r") as lastest_file:
latest_header = lastest_file.readline()
if latest_header != last_ingested_header:
logger.info("Deltas: Headers do not match - abandoning deltas")
return reject_deltas
# generate deltas files (additions and removals) with correct headers
try:
if sort_sources:
logger.info("Deltas: Sorting source files (initially slower but "
"produces fewer deltas)")
# sort the source files - this is slower but produces fewer deltas
_, early_file_name = tempfile.mkstemp()
sort_file_preserve_header(early_file_name, last_ingested_file_name)
logger.info("Deltas: Sorted file for last successful ingestion: "
f"{early_file_name}")
_, later_file_name = tempfile.mkstemp()
sort_file_preserve_header(later_file_name, latest_filename)
logger.info("Deltas: Sorted file for latest source file: "
f"{later_file_name}")
else:
early_file_name = last_ingested_file_name
later_file_name = latest_filename
# 'comm' command has an annoying incompatibility between linux and mac
nocheck_flag = ["--nocheck-order"] # linux requires this if not sorted
if platform == "darwin": # but mac does not support the flag
nocheck_flag = []
# generate additions file (or return filename: None)
deltas_add_file_name = new_file_with_header(latest_header)
deltas_add_file = open(deltas_add_file_name, "a")
initial_file_size = deltas_add_file.tell()
if subprocess.run(["/usr/bin/env",
"comm",
"-13", # Suppress unique lines from file1 and common
early_file_name,
later_file_name
] + nocheck_flag,
stdout=deltas_add_file).returncode > 0:
logger.error("Deltas: second comm command returned an error code")
return reject_deltas
if deltas_add_file.tell() == initial_file_size:
deltas_add_file_name = None
deltas_add_file.close()
# generate removals file (or return filename: None)
deltas_del_file_name = new_file_with_header(latest_header)
deltas_del_file = open(deltas_del_file_name, "a")
initial_file_size = deltas_del_file.tell()
if subprocess.run(["/usr/bin/env",
"comm",
"-23", # Suppress unique lines from file2 and common
early_file_name,
later_file_name
] + nocheck_flag,
stdout=deltas_del_file).returncode > 0:
logger.error("Deltas: first comm command returned an error code")
return reject_deltas
if deltas_del_file.tell() == initial_file_size:
deltas_del_file_name = None
deltas_del_file.close()
except subprocess.CalledProcessError as e:
logger.error(f"Deltas: Process error during call to comm command: {e}")
return reject_deltas
# finally, check that the deltas aren't replacing most of the source file,
# wherein we would be better to simply re-ingest the full source and reset
# delta tracking (remembering that Del deltas accumulate records in the DB)
if deltas_del_file_name:
if (os.path.getsize(deltas_del_file_name)
> (0.5 * os.path.getsize(last_ingested_file_name))):
return reject_deltas
return deltas_add_file_name, deltas_del_file_name
def parse_datetime(date_str: str) -> datetime:
|
"""Isolate functionality to facilitate easier mocking"""
return dateutil.parser.parse(date_str)
|
identifier_body
|
|
wal.go
|
closed int32
}
// NewReader constructs a new Reader for reading from this WAL starting at the
// given offset. The returned Reader is NOT safe for use from multiple
// goroutines. Name is just a label for the reader used during logging.
func (wal *WAL) NewReader(name string, offset Offset, bufferSource func() []byte) (*Reader, error) {
r := &Reader{
filebased: filebased{
dir: wal.dir,
fileFlags: os.O_RDONLY,
h: newHash(),
log: golog.LoggerFor("wal." + name),
},
wal: wal,
bufferSource: bufferSource,
}
if offset != nil {
offsetString := sequenceToFilename(offset.FileSequence())
if offsetString[0] != '0' {
wal.log.Debugf("Converting legacy offset")
offset = NewOffset(offset.FileSequence()/1000, offset.Position())
}
files, err := ioutil.ReadDir(wal.dir)
if err != nil {
return nil, fmt.Errorf("Unable to list existing log files: %v", err)
}
cutoff := sequenceToFilename(offset.FileSequence())
for i, fileInfo := range files {
isMostRecent := i == len(files)-1
if fileInfo.Name() >= cutoff {
// Found existing or more recent WAL file
r.fileSequence = filenameToSequence(fileInfo.Name())
if r.fileSequence == offset.FileSequence() {
// Exact match, start at right position
r.position = offset.Position()
if r.position == fileInfo.Size() && !isMostRecent {
// At end of file and more recent is available, move to next
continue
}
} else {
// Newer WAL file, start at beginning
r.position = 0
}
openErr := r.open()
if openErr != nil {
return nil, fmt.Errorf("Unable to open existing log file at %v: %v", fileInfo.Name(), openErr)
}
break
}
}
}
if r.file == nil {
// Didn't find WAL file, advance
err := r.advance()
if err != nil {
return nil, fmt.Errorf("Unable to advance initially: %v", err)
}
wal.log.Debugf("Replaying log starting at %v", r.file.Name())
}
return r, nil
}
// Read reads the next chunk from the WAL, blocking until one is available.
func (r *Reader) Read() ([]byte, error) {
for {
length, err := r.readHeader()
if err != nil {
return nil, err
}
checksum, err := r.readHeader()
if err != nil {
return nil, err
}
if length > maxEntrySize {
fmt.Printf("Discarding wal entry of size %v exceeding %v, probably corrupted\n", humanize.Bytes(uint64(length)), humanize.Bytes(uint64(maxEntrySize)))
_, discardErr := io.CopyN(ioutil.Discard, r.reader, int64(length))
if discardErr == io.EOF {
discardErr = nil
}
return nil, discardErr
}
data, err := r.readData(length)
if data != nil || err != nil {
if data != nil {
r.h.Reset()
r.h.Write(data)
if checksum != int(r.h.Sum32()) {
r.log.Errorf("Checksum mismatch, skipping entry")
continue
}
}
return data, err
}
}
}
func (r *Reader) readHeader() (int, error) {
headBuf := make([]byte, 4)
top:
for {
length := 0
read := 0
for {
if atomic.LoadInt32(&r.stopped) == 1 {
return 0, io.EOF
}
if atomic.LoadInt32(&r.closed) == 1 {
return 0, io.ErrUnexpectedEOF
}
n, err := r.reader.Read(headBuf[read:])
read += n
r.position += int64(n)
if err != nil && err.Error() == "EOF" && read < 4 {
if r.wal.hasMovedBeyond(r.fileSequence) {
if read > 0 {
r.log.Errorf("Out of data to read after reading %d, and WAL has moved beyond %d. Assuming WAL at %v corrupted. Advancing and continuing.", r.position, r.fileSequence, r.filename())
}
advanceErr := r.advance()
if advanceErr != nil {
return 0, advanceErr
}
continue top
}
// No newer log files, continue trying to read from this one
time.Sleep(50 * time.Millisecond)
continue
}
if err != nil {
r.log.Errorf("Unexpected error reading header from WAL file %v: %v", r.filename(), err)
break
}
if read == 4 {
length = int(encoding.Uint32(headBuf))
break
}
}
if length > sentinel {
return length, nil
}
err := r.advance()
if err != nil {
return 0, err
}
}
}
func (r *Reader) readData(length int) ([]byte, error) {
buf := r.bufferSource()
// Grow buffer if necessary
if cap(buf) < length {
buf = make([]byte, length)
}
// Set buffer length
buf = buf[:length]
// Read into buffer
read := 0
for {
if atomic.LoadInt32(&r.stopped) == 1 {
return nil, io.EOF
}
if atomic.LoadInt32(&r.closed) == 1 {
return nil, io.ErrUnexpectedEOF
}
n, err := r.reader.Read(buf[read:])
read += n
r.position += int64(n)
if err != nil && err.Error() == "EOF" && read < length {
if r.wal.hasMovedBeyond(r.fileSequence) {
r.log.Errorf("Out of data to read after reading %d, and WAL has moved beyond %d. Assuming WAL at %v corrupted. Advancing and continuing.", r.position, r.fileSequence, r.filename())
advanceErr := r.advance()
if advanceErr != nil {
return nil, advanceErr
}
return nil, nil
}
// No newer log files, continue trying to read from this one
time.Sleep(50 * time.Millisecond)
continue
}
if err != nil {
r.log.Errorf("Unexpected error reading data from WAL file %v: %v", r.filename(), err)
return nil, nil
}
if read == length {
return buf, nil
}
}
}
// Offset returns the furthest Offset read by this Reader. It is NOT safe to
// call this concurrently with Read().
func (r *Reader) Offset() Offset {
return NewOffset(r.fileSequence, r.position)
}
// Stop stops this reader from advancing
func (r *Reader) Stop() {
atomic.StoreInt32(&r.stopped, 1)
}
// Close closes the Reader.
func (r *Reader) Close() error {
atomic.StoreInt32(&r.closed, 1)
return r.file.Close()
}
func (r *Reader) open() error {
err := r.openFile()
if err != nil {
return err
}
if r.compressed {
r.reader = snappy.NewReader(r.file)
} else {
r.reader = bufio.NewReaderSize(r.file, defaultFileBuffer)
}
if r.position > 0 {
// Read to the correct offset
// Note - we cannot just seek on the file because the data is compressed and
// the recorded position does not correspond to a file offset.
_, seekErr := io.CopyN(ioutil.Discard, r.reader, r.position)
if seekErr != nil {
return seekErr
}
}
return nil
}
func (r *Reader) advance() error {
r.log.Debugf("Advancing in %v", r.dir)
for {
if atomic.LoadInt32(&r.closed) == 1 {
return io.ErrUnexpectedEOF
}
files, err := ioutil.ReadDir(r.dir)
if err != nil {
return fmt.Errorf("Unable to list existing log files: %v", err)
}
cutoff := sequenceToFilename(r.fileSequence)
for _, fileInfo := range files {
seq := filenameToSequence(fileInfo.Name())
if seq == r.fileSequence {
// Duplicate WAL segment (i.e. compressed vs uncompressed), ignore
continue
}
if fileInfo.Name() > cutoff {
// Files are sorted by name, if we've gotten past the cutoff, don't bother
// continuing
r.position = 0
r.fileSequence = seq
return r.open()
}
}
time.Sleep(50 * time.Millisecond)
}
}
func newFileSequence() int64 {
return tsToFileSequence(time.Now())
}
func tsToFileSequence(ts time.Time) int64
|
{
return ts.UnixNano() / 1000
}
|
identifier_body
|
|
wal.go
|
.Close()
out, err := ioutil.TempFile("", "")
if err != nil {
return false, fmt.Errorf("Unable to open temp file to compress %v: %v", infile, err)
}
defer out.Close()
defer os.Remove(out.Name())
compressedOut := snappy.NewWriter(out)
_, err = io.Copy(compressedOut, bufio.NewReaderSize(in, defaultFileBuffer))
if err != nil {
return false, fmt.Errorf("Unable to compress %v: %v", infile, err)
}
err = compressedOut.Close()
if err != nil {
return false, fmt.Errorf("Unable to finalize compression of %v: %v", infile, err)
}
err = out.Close()
if err != nil {
return false, fmt.Errorf("Unable to close compressed output %v: %v", outfile, err)
}
err = os.Rename(out.Name(), outfile)
if err != nil {
return false, fmt.Errorf("Unable to move compressed output %v to final destination %v: %v", out.Name(), outfile, err)
}
err = os.Remove(infile)
if err != nil {
return false, fmt.Errorf("Unable to remove uncompressed file %v: %v", infile, err)
}
wal.log.Debugf("Compressed WAL file %v", infile)
return true, nil
}
func (wal *WAL) forEachSegment(cb func(file os.FileInfo, first bool, last bool) (bool, error)) error {
files, err := ioutil.ReadDir(wal.dir)
if err != nil {
return fmt.Errorf("Unable to list log segments: %v", err)
}
for i, file := range files {
more, err := cb(file, i == 0, i == len(files)-1)
if !more || err != nil {
return err
}
}
return nil
}
func (wal *WAL) forEachSegmentInReverse(cb func(file os.FileInfo, first bool, last bool) (bool, error)) error {
files, err := ioutil.ReadDir(wal.dir)
if err != nil {
return fmt.Errorf("Unable to list log segments: %v", err)
}
for i := len(files) - 1; i >= 0; i-- {
more, err := cb(files[i], i == 0, i == len(files)-1)
if !more || err != nil {
return err
}
}
return nil
}
// Close closes the wal, including flushing any unsaved writes.
func (wal *WAL) Close() (err error) {
wal.closeOnce.Do(func() {
select {
case <-wal.closed:
// already closed
return
default:
// continue
}
wal.log.Debug("Closing")
defer wal.log.Debug("Closed")
close(wal.closed)
if wal.backlog != nil {
close(wal.backlog)
<-wal.backlogFinished
}
wal.mx.Lock()
flushErr := wal.writer.Flush()
syncErr := wal.file.Sync()
wal.mx.Unlock()
closeErr := wal.file.Close()
if flushErr != nil {
err = flushErr
}
if syncErr != nil {
err = syncErr
}
err = closeErr
})
return
}
func (wal *WAL) advance() error {
wal.fileSequence = newFileSequence()
wal.position = 0
err := wal.openFile()
if err == nil {
wal.writer = bufio.NewWriterSize(wal.file, defaultFileBuffer)
}
return err
}
func (wal *WAL) sync(syncInterval time.Duration) {
for {
time.Sleep(syncInterval)
select {
case <-wal.closed:
return
default:
wal.mx.Lock()
wal.doSync()
wal.mx.Unlock()
}
}
}
func (wal *WAL) doSync() {
err := wal.writer.Flush()
if err != nil {
wal.log.Errorf("Unable to flush wal: %v", err)
return
}
err = wal.file.Sync()
if err != nil {
wal.log.Errorf("Unable to sync wal: %v", err)
}
}
func (wal *WAL) hasMovedBeyond(fileSequence int64) bool {
wal.mx.RLock()
hasMovedBeyond := wal.fileSequence > fileSequence
wal.mx.RUnlock()
return hasMovedBeyond
}
// Reader allows reading from a WAL. It is NOT safe to read from a single Reader
// from multiple goroutines.
type Reader struct {
filebased
wal *WAL
reader io.Reader
bufferSource func() []byte
stopped int32
closed int32
}
// NewReader constructs a new Reader for reading from this WAL starting at the
// given offset. The returned Reader is NOT safe for use from multiple
// goroutines. Name is just a label for the reader used during logging.
func (wal *WAL) NewReader(name string, offset Offset, bufferSource func() []byte) (*Reader, error) {
r := &Reader{
filebased: filebased{
dir: wal.dir,
fileFlags: os.O_RDONLY,
h: newHash(),
log: golog.LoggerFor("wal." + name),
},
wal: wal,
bufferSource: bufferSource,
}
if offset != nil {
offsetString := sequenceToFilename(offset.FileSequence())
if offsetString[0] != '0' {
wal.log.Debugf("Converting legacy offset")
offset = NewOffset(offset.FileSequence()/1000, offset.Position())
}
files, err := ioutil.ReadDir(wal.dir)
if err != nil {
return nil, fmt.Errorf("Unable to list existing log files: %v", err)
}
cutoff := sequenceToFilename(offset.FileSequence())
for i, fileInfo := range files {
isMostRecent := i == len(files)-1
if fileInfo.Name() >= cutoff {
// Found existing or more recent WAL file
r.fileSequence = filenameToSequence(fileInfo.Name())
if r.fileSequence == offset.FileSequence() {
// Exact match, start at right position
r.position = offset.Position()
if r.position == fileInfo.Size() && !isMostRecent {
// At end of file and more recent is available, move to next
continue
}
} else {
// Newer WAL file, start at beginning
r.position = 0
}
openErr := r.open()
if openErr != nil {
return nil, fmt.Errorf("Unable to open existing log file at %v: %v", fileInfo.Name(), openErr)
}
break
}
}
}
if r.file == nil {
// Didn't find WAL file, advance
err := r.advance()
if err != nil {
return nil, fmt.Errorf("Unable to advance initially: %v", err)
}
wal.log.Debugf("Replaying log starting at %v", r.file.Name())
}
return r, nil
}
// Read reads the next chunk from the WAL, blocking until one is available.
func (r *Reader) Read() ([]byte, error) {
for {
length, err := r.readHeader()
if err != nil {
return nil, err
}
checksum, err := r.readHeader()
if err != nil {
return nil, err
}
if length > maxEntrySize {
fmt.Printf("Discarding wal entry of size %v exceeding %v, probably corrupted\n", humanize.Bytes(uint64(length)), humanize.Bytes(uint64(maxEntrySize)))
_, discardErr := io.CopyN(ioutil.Discard, r.reader, int64(length))
if discardErr == io.EOF {
discardErr = nil
}
return nil, discardErr
}
data, err := r.readData(length)
if data != nil || err != nil {
if data != nil {
r.h.Reset()
r.h.Write(data)
if checksum != int(r.h.Sum32()) {
r.log.Errorf("Checksum mismatch, skipping entry")
continue
}
}
return data, err
}
}
}
func (r *Reader) readHeader() (int, error) {
headBuf := make([]byte, 4)
top:
for {
length := 0
read := 0
for
|
{
if atomic.LoadInt32(&r.stopped) == 1 {
return 0, io.EOF
}
if atomic.LoadInt32(&r.closed) == 1 {
return 0, io.ErrUnexpectedEOF
}
n, err := r.reader.Read(headBuf[read:])
read += n
r.position += int64(n)
if err != nil && err.Error() == "EOF" && read < 4 {
if r.wal.hasMovedBeyond(r.fileSequence) {
if read > 0 {
r.log.Errorf("Out of data to read after reading %d, and WAL has moved beyond %d. Assuming WAL at %v corrupted. Advancing and continuing.", r.position, r.fileSequence, r.filename())
}
advanceErr := r.advance()
if advanceErr != nil {
return 0, advanceErr
}
continue top
|
conditional_block
|
|
wal.go
|
.writer.Flush()
syncErr := wal.file.Sync()
wal.mx.Unlock()
closeErr := wal.file.Close()
if flushErr != nil {
err = flushErr
}
if syncErr != nil {
err = syncErr
}
err = closeErr
})
return
}
func (wal *WAL) advance() error {
wal.fileSequence = newFileSequence()
wal.position = 0
err := wal.openFile()
if err == nil {
wal.writer = bufio.NewWriterSize(wal.file, defaultFileBuffer)
}
return err
}
func (wal *WAL) sync(syncInterval time.Duration) {
for {
time.Sleep(syncInterval)
select {
case <-wal.closed:
return
default:
wal.mx.Lock()
wal.doSync()
wal.mx.Unlock()
}
}
}
func (wal *WAL) doSync() {
err := wal.writer.Flush()
if err != nil {
wal.log.Errorf("Unable to flush wal: %v", err)
return
}
err = wal.file.Sync()
if err != nil {
wal.log.Errorf("Unable to sync wal: %v", err)
}
}
func (wal *WAL) hasMovedBeyond(fileSequence int64) bool {
wal.mx.RLock()
hasMovedBeyond := wal.fileSequence > fileSequence
wal.mx.RUnlock()
return hasMovedBeyond
}
// Reader allows reading from a WAL. It is NOT safe to read from a single Reader
// from multiple goroutines.
type Reader struct {
filebased
wal *WAL
reader io.Reader
bufferSource func() []byte
stopped int32
closed int32
}
// NewReader constructs a new Reader for reading from this WAL starting at the
// given offset. The returned Reader is NOT safe for use from multiple
// goroutines. Name is just a label for the reader used during logging.
func (wal *WAL) NewReader(name string, offset Offset, bufferSource func() []byte) (*Reader, error) {
r := &Reader{
filebased: filebased{
dir: wal.dir,
fileFlags: os.O_RDONLY,
h: newHash(),
log: golog.LoggerFor("wal." + name),
},
wal: wal,
bufferSource: bufferSource,
}
if offset != nil {
offsetString := sequenceToFilename(offset.FileSequence())
if offsetString[0] != '0' {
wal.log.Debugf("Converting legacy offset")
offset = NewOffset(offset.FileSequence()/1000, offset.Position())
}
files, err := ioutil.ReadDir(wal.dir)
if err != nil {
return nil, fmt.Errorf("Unable to list existing log files: %v", err)
}
cutoff := sequenceToFilename(offset.FileSequence())
for i, fileInfo := range files {
isMostRecent := i == len(files)-1
if fileInfo.Name() >= cutoff {
// Found existing or more recent WAL file
r.fileSequence = filenameToSequence(fileInfo.Name())
if r.fileSequence == offset.FileSequence() {
// Exact match, start at right position
r.position = offset.Position()
if r.position == fileInfo.Size() && !isMostRecent {
// At end of file and more recent is available, move to next
continue
}
} else {
// Newer WAL file, start at beginning
r.position = 0
}
openErr := r.open()
if openErr != nil {
return nil, fmt.Errorf("Unable to open existing log file at %v: %v", fileInfo.Name(), openErr)
}
break
}
}
}
if r.file == nil {
// Didn't find WAL file, advance
err := r.advance()
if err != nil {
return nil, fmt.Errorf("Unable to advance initially: %v", err)
}
wal.log.Debugf("Replaying log starting at %v", r.file.Name())
}
return r, nil
}
// Read reads the next chunk from the WAL, blocking until one is available.
func (r *Reader) Read() ([]byte, error) {
for {
length, err := r.readHeader()
if err != nil {
return nil, err
}
checksum, err := r.readHeader()
if err != nil {
return nil, err
}
if length > maxEntrySize {
fmt.Printf("Discarding wal entry of size %v exceeding %v, probably corrupted\n", humanize.Bytes(uint64(length)), humanize.Bytes(uint64(maxEntrySize)))
_, discardErr := io.CopyN(ioutil.Discard, r.reader, int64(length))
if discardErr == io.EOF {
discardErr = nil
}
return nil, discardErr
}
data, err := r.readData(length)
if data != nil || err != nil {
if data != nil {
r.h.Reset()
r.h.Write(data)
if checksum != int(r.h.Sum32()) {
r.log.Errorf("Checksum mismatch, skipping entry")
continue
}
}
return data, err
}
}
}
func (r *Reader) readHeader() (int, error) {
headBuf := make([]byte, 4)
top:
for {
length := 0
read := 0
for {
if atomic.LoadInt32(&r.stopped) == 1 {
return 0, io.EOF
}
if atomic.LoadInt32(&r.closed) == 1 {
return 0, io.ErrUnexpectedEOF
}
n, err := r.reader.Read(headBuf[read:])
read += n
r.position += int64(n)
if err != nil && err.Error() == "EOF" && read < 4 {
if r.wal.hasMovedBeyond(r.fileSequence) {
if read > 0 {
r.log.Errorf("Out of data to read after reading %d, and WAL has moved beyond %d. Assuming WAL at %v corrupted. Advancing and continuing.", r.position, r.fileSequence, r.filename())
}
advanceErr := r.advance()
if advanceErr != nil {
return 0, advanceErr
}
continue top
}
// No newer log files, continue trying to read from this one
time.Sleep(50 * time.Millisecond)
continue
}
if err != nil {
r.log.Errorf("Unexpected error reading header from WAL file %v: %v", r.filename(), err)
break
}
if read == 4 {
length = int(encoding.Uint32(headBuf))
break
}
}
if length > sentinel {
return length, nil
}
err := r.advance()
if err != nil {
return 0, err
}
}
}
func (r *Reader) readData(length int) ([]byte, error) {
buf := r.bufferSource()
// Grow buffer if necessary
if cap(buf) < length {
buf = make([]byte, length)
}
// Set buffer length
buf = buf[:length]
// Read into buffer
read := 0
for {
if atomic.LoadInt32(&r.stopped) == 1 {
return nil, io.EOF
}
if atomic.LoadInt32(&r.closed) == 1 {
return nil, io.ErrUnexpectedEOF
}
n, err := r.reader.Read(buf[read:])
read += n
r.position += int64(n)
if err != nil && err.Error() == "EOF" && read < length {
if r.wal.hasMovedBeyond(r.fileSequence) {
r.log.Errorf("Out of data to read after reading %d, and WAL has moved beyond %d. Assuming WAL at %v corrupted. Advancing and continuing.", r.position, r.fileSequence, r.filename())
advanceErr := r.advance()
if advanceErr != nil {
return nil, advanceErr
}
return nil, nil
}
// No newer log files, continue trying to read from this one
time.Sleep(50 * time.Millisecond)
continue
}
if err != nil {
r.log.Errorf("Unexpected error reading data from WAL file %v: %v", r.filename(), err)
return nil, nil
}
if read == length {
return buf, nil
}
}
}
// Offset returns the furthest Offset read by this Reader. It is NOT safe to
// call this concurrently with Read().
func (r *Reader) Offset() Offset {
return NewOffset(r.fileSequence, r.position)
}
// Stop stops this reader from advancing
func (r *Reader) Stop() {
atomic.StoreInt32(&r.stopped, 1)
}
// Close closes the Reader.
func (r *Reader) Close() error {
atomic.StoreInt32(&r.closed, 1)
return r.file.Close()
}
func (r *Reader) open() error {
err := r.openFile()
if err != nil {
return err
}
if r.compressed {
r.reader = snappy.NewReader(r.file)
} else {
|
random_line_split
|
||
wal.go
|
offset = NewOffset(fileSequence, position)
}
}()
var r io.Reader
r, err := os.OpenFile(filepath.Join(wal.dir, filename), os.O_RDONLY, 0600)
if err != nil {
return false, fmt.Errorf("Unable to open WAL file %v: %v", filename, err)
}
if strings.HasSuffix(filename, compressedSuffix) {
r = snappy.NewReader(r)
} else {
r = bufio.NewReaderSize(r, defaultFileBuffer)
}
h := newHash()
for {
headBuf := make([]byte, 8)
_, err := io.ReadFull(r, headBuf)
if err != nil {
// upon encountering a read error, break, as we've found the end of the latest segment
return false, nil
}
length := int64(encoding.Uint32(headBuf))
checksum := uint32(encoding.Uint32(headBuf[4:]))
b := make([]byte, length)
_, err = io.ReadFull(r, b)
if err != nil {
// upon encountering a read error, break, as we've found the end of the latest segment
break
}
h.Reset()
h.Write(b)
if h.Sum32() != checksum {
// checksum failure means we've hit a corrupted entry, so we're at the end
break
}
data = b
position += 8 + length
}
lastSeq = fileSequence
return true, nil
})
// No files found with a valid entry, return nil data and offset
return data, offset, err
}
// Write atomically writes one or more buffers to the WAL.
func (wal *WAL) Write(bufs ...[]byte) error {
if wal.backlog != nil {
wal.backlog <- bufs
return nil
} else {
return wal.doWrite(bufs...)
}
}
func (wal *WAL)
|
() {
defer close(wal.backlogFinished)
for bufs := range wal.backlog {
if err := wal.doWrite(bufs...); err != nil {
wal.log.Errorf("Error writing to WAL!: %v", err)
}
}
}
func (wal *WAL) doWrite(bufs ...[]byte) error {
wal.mx.Lock()
defer wal.mx.Unlock()
length := 0
for _, b := range bufs {
blen := len(b)
length += blen
if length > maxEntrySize {
fmt.Printf("Ignoring wal entry of size %v exceeding %v", humanize.Bytes(uint64(blen)), humanize.Bytes(uint64(maxEntrySize)))
return nil
}
}
if length == 0 {
return nil
}
if wal.position >= maxSegmentSize {
// Write sentinel length to mark end of file
if _, advanceErr := wal.writer.Write(sentinelBytes); advanceErr != nil {
return advanceErr
}
if advanceErr := wal.writer.Flush(); advanceErr != nil {
return advanceErr
}
if advanceErr := wal.advance(); advanceErr != nil {
return fmt.Errorf("Unable to advance to next file: %v", advanceErr)
}
}
wal.h.Reset()
for _, buf := range bufs {
wal.h.Write(buf)
}
headerBuf := make([]byte, 4)
// Write length
encoding.PutUint32(headerBuf, uint32(length))
n, err := wal.writer.Write(headerBuf)
wal.position += int64(n)
if err != nil {
return err
}
// Write checksum
encoding.PutUint32(headerBuf, wal.h.Sum32())
n, err = wal.writer.Write(headerBuf)
wal.position += int64(n)
if err != nil {
return err
}
for _, b := range bufs {
n, err = wal.writer.Write(b)
if err != nil {
return err
}
wal.position += int64(n)
}
if wal.syncImmediate {
wal.doSync()
}
return nil
}
// TruncateBefore removes all data prior to the given offset from disk.
func (wal *WAL) TruncateBefore(o Offset) error {
cutoff := sequenceToFilename(o.FileSequence())
_, latestOffset, err := wal.Latest()
if err != nil {
return fmt.Errorf("Unable to determine latest offset: %v", err)
}
latestSequence := latestOffset.FileSequence()
return wal.forEachSegment(func(file os.FileInfo, first bool, last bool) (bool, error) {
if last || file.Name() >= cutoff {
// Files are sorted by name, if we've gotten past the cutoff or
// encountered the last (active) file, don't bother continuing.
return false, nil
}
if filenameToSequence(file.Name()) == latestSequence {
// Don't delete the file containing the latest valid entry
return true, nil
}
rmErr := os.Remove(filepath.Join(wal.dir, file.Name()))
if rmErr != nil {
return false, rmErr
}
wal.log.Debugf("Removed WAL file %v", filepath.Join(wal.dir, file.Name()))
return true, nil
})
}
// TruncateBeforeTime truncates WAL data prior to the given timestamp.
func (wal *WAL) TruncateBeforeTime(ts time.Time) error {
return wal.TruncateBefore(NewOffset(tsToFileSequence(ts), 0))
}
// TruncateToSize caps the size of the WAL to the given number of bytes
func (wal *WAL) TruncateToSize(limit int64) error {
seen := int64(0)
return wal.forEachSegmentInReverse(func(file os.FileInfo, first bool, last bool) (bool, error) {
next := file.Size()
seen += next
if seen > limit {
fullname := filepath.Join(wal.dir, file.Name())
rmErr := os.Remove(fullname)
if rmErr != nil {
return false, rmErr
}
wal.log.Debugf("Removed WAL file %v", fullname)
}
return true, nil
})
}
// CompressBefore compresses all data prior to the given offset on disk.
func (wal *WAL) CompressBefore(o Offset) error {
cutoff := sequenceToFilename(o.FileSequence())
return wal.forEachSegment(func(file os.FileInfo, first bool, last bool) (bool, error) {
if last || file.Name() >= cutoff {
// Files are sorted by name, if we've gotten past the cutoff or
// encountered the last (active) file, don't bother continuing.
return false, nil
}
return wal.compress(file)
})
}
// CompressBeforeTime compresses all data prior to the given offset on disk.
func (wal *WAL) CompressBeforeTime(ts time.Time) error {
return wal.CompressBefore(NewOffset(tsToFileSequence(ts), 0))
}
// CompressBeforeSize compresses all segments prior to the given size
func (wal *WAL) CompressBeforeSize(limit int64) error {
seen := int64(0)
return wal.forEachSegmentInReverse(func(file os.FileInfo, first bool, last bool) (bool, error) {
if last {
// Don't compress the last (active) file
return true, nil
}
next := file.Size()
seen += next
if seen > limit {
return wal.compress(file)
}
return true, nil
})
}
func (wal *WAL) compress(file os.FileInfo) (bool, error) {
infile := filepath.Join(wal.dir, file.Name())
outfile := infile + compressedSuffix
if strings.HasSuffix(file.Name(), compressedSuffix) {
// Already compressed
return true, nil
}
in, err := os.OpenFile(infile, os.O_RDONLY, 0600)
if err != nil {
return false, fmt.Errorf("Unable to open input file %v for compression: %v", infile, err)
}
defer in.Close()
out, err := ioutil.TempFile("", "")
if err != nil {
return false, fmt.Errorf("Unable to open temp file to compress %v: %v", infile, err)
}
defer out.Close()
defer os.Remove(out.Name())
compressedOut := snappy.NewWriter(out)
_, err = io.Copy(compressedOut, bufio.NewReaderSize(in, defaultFileBuffer))
if err != nil {
return false, fmt.Errorf("Unable to compress %v: %v", infile, err)
}
err = compressedOut.Close()
if err != nil {
return false, fmt.Errorf("Unable to finalize compression of %v: %v", infile, err)
}
err = out.Close()
if err != nil {
return false, fmt.Errorf("Unable to close compressed output %v: %v", outfile, err)
}
err = os.Rename(out.Name(), outfile)
if err != nil {
return false, fmt.Errorf("Unable to move compressed output %v to final destination %v: %v", out.Name(), outfile, err)
}
err = os.Remove(infile)
if err != nil {
return false, fmt.Errorf("Unable to remove uncompressed file %v: %v", infile, err)
}
w
|
writeAsync
|
identifier_name
|
dirutils.js
|
constructed as needed.
For example if a folder exists here:
/path/to/folder
... but the following sub-folders don't exists:
/path/to/folder/sub/one/two/three
... Then the "sub/one/two/three" tree will be constructed inside "/path/to/folder")
* @method makedir
* @private
* @param {string} dest="path/to/make" The destination folder to create
*/
function makedir(dest) {
dest = path.resolve(dest);
if (!fs.existsSync(dest)) {
makedir(path.dirname(dest));
fs.mkdirSync(dest); // adds to wait stack
}
};
/*
{
by : "ext",
accept : ["js", "html"],
reject : ["js", "html"]
}
*/
/**
Collects files from a folder based on the specified extension (or
extensions). Can be used to search recursively through all sub-folders, and can
search multiple extensions.
NOTE: Extension filtering is case-insensative, so files with both upper and lower-case extensions will be captured.
Provided as shortcut for [readdir](#readdir) with your own extension-checking filter.
* @method readExt
*
* @param {string} from - The path to search
* @param {string | array} [exts] - The extension to look for (e.g. "jpg"). To search for multiple extensions, use an array e.g. ["jpg", "png", "gif"].
* @param {boolean} [recursive] - Find all matching files in all sub-folders.
* @param {function} [filter] - A function to filter items on. The signature for this function's arguments is:
- isFolder (boolean): Whether the item is a folder or not
- file (string): The URI to the file
- stats (object) : Info for the file such as time. See Node's [statSync](https://nodejs.org/api/fs.html#fs_class_fs_stats)
- pathInfo (object) : Since we're already parsing the path via [path.parse](path.parse), we're sending the results fo thsi object to you.
*
* @return {array} - The resulting array contains only files that mathc the
specified extension(s).
*/
function readExt(from, exts, recursive, filter){
for(var i=0; i<exts.length; i++){
exts[i] = exts[i].toLowerCase();
}
var extFilter;
if( Array.isArray(exts) ){
extFilter = function(isFolder, file, stats){
if( isFolder ){
return false;
} else {
var item = path.parse( file );
var ok = exts.indexOf(item.ext.substr(1).toLowerCase()) > -1;
if(filter && ok){
ok = filter(isFolder, file, stats, item);
}
return ok;
}
}
} else {
extFilter = function(isFolder, file, stats){
if( isFolder ){
return false;
} else {
var item = path.parse( file );
var ok = item.ext.substr(1).toLowerCase() == exts;
if(filter && ok){
ok = filter(isFolder, file, stats, item);
}
return ok;
}
}
}
var obj = readdir(from, extFilter, recursive);
return obj.files;
}
/**
* Read a folder and returns an object containing all of the files and
folder in arrays.
* @method readdir
* @private
* @param {string} from - The path to the folder to read.
* @param {function} filter - A custom filter funciton should return boolean for inclusion. The function will be set arguments fo the following signature:
*
* filter( isFolder [boolean], file [URI string], stats [instance of Node's statSync] );
*
* // See Node's statSync : https://nodejs.org/api/fs.html#fs_class_fs_stats
*
* @param {boolean} recursive - Should we retrieve sub-folders too?
* @param {object} store - Used internally to store recursive findings.
Note that you may also provide this argument and readdir will populate your
existing files/folder list. But is recommended to leave this argument alone.
*
* @return {object} - An object containing a list of "files" and "folders"
(as properties of the returned list), where each is an array.
*
@example
var contents = readdir("/path/to/folder", null, true);
// yeids contents {
// files : [
// "/path/to/folder/1.foo",
// "/path/to/folder/2.bar",
// "/path/to/folder/3.png",
// "/path/to/folder/sub1/1.foo",
// "/path/to/folder/sub2/2.bar",
// "/path/to/folder/sub3/3.png"
// ],
// dirs : [
// "/path/to/folder/sub1",
// "/path/to/folder/sub2",
// "/path/to/folder/sub3"
//
// ]
// }
*/
function readdir(from, filter, recursive, store){
if( ! store ){
store = { dirs: [], files: [] };
}
var hasFilterFunction = typeof filter == 'function';
var files = fs.readdirSync(from);
var len = files.length;
for(var i=0; i<len; i++){
var file = path.join(from, files[i]);
var stats = false; // set this value otherwise a failing try will pickup the previous stats value (hoisted var)
// file may be a freak, and nod eay not be able to run stats on it.
try {
// stats = fs.lstatSync(file);
stats = fs.statSync(file); // de-reference symlinks (follows symbolic links)
} catch(e) {
// ignore
}
if(stats){
if ( stats.isDirectory() ) {
if(hasFilterFunction){
if( filter( true, file, stats ) ){
store.dirs.push(file);
}
} else {
store.dirs.push(file);
}
if(recursive){
readdir(file, filter, true, store);
}
} else if ( stats.isFile() ) {
if(hasFilterFunction){
if( filter( false, file, stats ) ){
store.files.push(file);
}
} else {
store.files.push(file);
}
}
}
}
return store;
}
/**
* Copies the entire folder's heirarchy folder from one location to another. If the other location doesn't exists, it will be constructed.
*
* @method copydir
* @private
* @param {string} from - The source folder
* @param {string} to - The destination folder (get's created if not exist)
*/
function copydir(from, to) {
var list = readdir(from, null, true);
if( ! exists(to) ){
makedir(to);
}
var dirs = list.dirs.sort(); // should be a little faster if we sort
for(var i=dirs.length; i--;){
makedir( path.join(to, path.relative(from, dirs[i])) );
}
var files = list.files;
for(var i=files.length; i--;){
var file = files[i];
fs.writeFileSync(
path.join(to, path.relative(from, file)),
fs.readFileSync(file, 'binary'),
'binary'
);
}
};
/**
* Recursively empties a folder of all it's contents (and all the sub-folder's contents), but leaves the source folder.
*
* @method emptydir
* @private
* @param {string} who - The source folder
* @param {boolean} dryRun - Prevents actual deletion, but still allows the return list to display what "will" be deleted.
*
* @return {array} - An array containing a list of paths to files and folders that we're deleted (or will be deleted when dryrun is true)
*/
function emptydir(who, dryRun)
|
removed.push(dir);
if( ! dryRun ){
fs.rmdirSync(dir);
}
}
}
return removed;
}
;
/**
* Recursively removes a folder and all of it's sub-folders as well.
*
* @method removedir
* @private
* @param {string} who - The path to the folder
* @param {boolean} dryRun - Prevents actual deletion, but
|
{
var removed = [];
if( exists(who) ) {
var list = readdir(who, null, true);
var files = list.files;
for(var i=files.length; i--;){
var file = files[i];
removed.push(file);
if( ! dryRun ){
fs.unlinkSync(file);
}
}
var dirs = list.dirs.sort(); // should be a little faster if we sort
for(var i=dirs.length; i--;){
var dir = dirs[i]
|
identifier_body
|
dirutils.js
|
constructed as needed.
For example if a folder exists here:
/path/to/folder
... but the following sub-folders don't exists:
/path/to/folder/sub/one/two/three
... Then the "sub/one/two/three" tree will be constructed inside "/path/to/folder")
* @method makedir
* @private
* @param {string} dest="path/to/make" The destination folder to create
*/
function makedir(dest) {
dest = path.resolve(dest);
if (!fs.existsSync(dest)) {
makedir(path.dirname(dest));
fs.mkdirSync(dest); // adds to wait stack
}
};
/*
{
by : "ext",
accept : ["js", "html"],
reject : ["js", "html"]
}
*/
/**
Collects files from a folder based on the specified extension (or
extensions). Can be used to search recursively through all sub-folders, and can
search multiple extensions.
NOTE: Extension filtering is case-insensative, so files with both upper and lower-case extensions will be captured.
Provided as shortcut for [readdir](#readdir) with your own extension-checking filter.
* @method readExt
*
* @param {string} from - The path to search
* @param {string | array} [exts] - The extension to look for (e.g. "jpg"). To search for multiple extensions, use an array e.g. ["jpg", "png", "gif"].
* @param {boolean} [recursive] - Find all matching files in all sub-folders.
* @param {function} [filter] - A function to filter items on. The signature for this function's arguments is:
- isFolder (boolean): Whether the item is a folder or not
- file (string): The URI to the file
- stats (object) : Info for the file such as time. See Node's [statSync](https://nodejs.org/api/fs.html#fs_class_fs_stats)
- pathInfo (object) : Since we're already parsing the path via [path.parse](path.parse), we're sending the results fo thsi object to you.
*
* @return {array} - The resulting array contains only files that mathc the
specified extension(s).
*/
function readExt(from, exts, recursive, filter){
for(var i=0; i<exts.length; i++){
exts[i] = exts[i].toLowerCase();
}
var extFilter;
if( Array.isArray(exts) ){
extFilter = function(isFolder, file, stats){
if( isFolder ){
return false;
} else {
var item = path.parse( file );
var ok = exts.indexOf(item.ext.substr(1).toLowerCase()) > -1;
if(filter && ok){
ok = filter(isFolder, file, stats, item);
}
return ok;
}
}
} else {
extFilter = function(isFolder, file, stats){
if( isFolder ){
return false;
} else {
var item = path.parse( file );
var ok = item.ext.substr(1).toLowerCase() == exts;
if(filter && ok){
ok = filter(isFolder, file, stats, item);
}
return ok;
}
}
}
var obj = readdir(from, extFilter, recursive);
return obj.files;
}
/**
* Read a folder and returns an object containing all of the files and
folder in arrays.
* @method readdir
* @private
* @param {string} from - The path to the folder to read.
* @param {function} filter - A custom filter funciton should return boolean for inclusion. The function will be set arguments fo the following signature:
*
* filter( isFolder [boolean], file [URI string], stats [instance of Node's statSync] );
*
* // See Node's statSync : https://nodejs.org/api/fs.html#fs_class_fs_stats
*
* @param {boolean} recursive - Should we retrieve sub-folders too?
* @param {object} store - Used internally to store recursive findings.
Note that you may also provide this argument and readdir will populate your
existing files/folder list. But is recommended to leave this argument alone.
*
* @return {object} - An object containing a list of "files" and "folders"
(as properties of the returned list), where each is an array.
*
@example
var contents = readdir("/path/to/folder", null, true);
// yeids contents {
// files : [
// "/path/to/folder/1.foo",
// "/path/to/folder/2.bar",
// "/path/to/folder/3.png",
// "/path/to/folder/sub1/1.foo",
// "/path/to/folder/sub2/2.bar",
// "/path/to/folder/sub3/3.png"
// ],
// dirs : [
// "/path/to/folder/sub1",
// "/path/to/folder/sub2",
// "/path/to/folder/sub3"
//
// ]
// }
*/
function readdir(from, filter, recursive, store){
if( ! store ){
store = { dirs: [], files: [] };
}
var hasFilterFunction = typeof filter == 'function';
var files = fs.readdirSync(from);
var len = files.length;
for(var i=0; i<len; i++){
var file = path.join(from, files[i]);
var stats = false; // set this value otherwise a failing try will pickup the previous stats value (hoisted var)
// file may be a freak, and nod eay not be able to run stats on it.
try {
// stats = fs.lstatSync(file);
stats = fs.statSync(file); // de-reference symlinks (follows symbolic links)
} catch(e) {
// ignore
}
if(stats){
if ( stats.isDirectory() ) {
if(hasFilterFunction){
if( filter( true, file, stats ) ){
store.dirs.push(file);
}
} else {
store.dirs.push(file);
}
if(recursive){
readdir(file, filter, true, store);
}
} else if ( stats.isFile() ) {
if(hasFilterFunction){
if( filter( false, file, stats ) ){
store.files.push(file);
}
} else {
store.files.push(file);
}
}
}
}
return store;
}
/**
* Copies the entire folder's heirarchy folder from one location to another. If the other location doesn't exists, it will be constructed.
*
* @method copydir
* @private
* @param {string} from - The source folder
* @param {string} to - The destination folder (get's created if not exist)
*/
function copydir(from, to) {
var list = readdir(from, null, true);
if( ! exists(to) ){
makedir(to);
}
var dirs = list.dirs.sort(); // should be a little faster if we sort
for(var i=dirs.length; i--;){
makedir( path.join(to, path.relative(from, dirs[i])) );
}
var files = list.files;
for(var i=files.length; i--;){
var file = files[i];
fs.writeFileSync(
path.join(to, path.relative(from, file)),
fs.readFileSync(file, 'binary'),
'binary'
);
}
};
/**
* Recursively empties a folder of all it's contents (and all the sub-folder's contents), but leaves the source folder.
*
* @method emptydir
* @private
* @param {string} who - The source folder
* @param {boolean} dryRun - Prevents actual deletion, but still allows the return list to display what "will" be deleted.
*
* @return {array} - An array containing a list of paths to files and folders that we're deleted (or will be deleted when dryrun is true)
*/
function
|
(who, dryRun) {
var removed = [];
if( exists(who) ) {
var list = readdir(who, null, true);
var files = list.files;
for(var i=files.length; i--;){
var file = files[i];
removed.push(file);
if( ! dryRun ){
fs.unlinkSync(file);
}
}
var dirs = list.dirs.sort(); // should be a little faster if we sort
for(var i=dirs.length; i--;){
var dir = dirs[i]
removed.push(dir);
if( ! dryRun ){
fs.rmdirSync(dir);
}
}
}
return removed;
};
/**
* Recursively removes a folder and all of it's sub-folders as well.
*
* @method removedir
* @private
* @param {string} who - The path to the folder
* @param {boolean} dryRun - Prevents actual deletion, but still
|
emptydir
|
identifier_name
|
dirutils.js
|
constructed as needed.
For example if a folder exists here:
/path/to/folder
... but the following sub-folders don't exists:
/path/to/folder/sub/one/two/three
... Then the "sub/one/two/three" tree will be constructed inside "/path/to/folder")
* @method makedir
* @private
* @param {string} dest="path/to/make" The destination folder to create
*/
function makedir(dest) {
dest = path.resolve(dest);
if (!fs.existsSync(dest)) {
makedir(path.dirname(dest));
fs.mkdirSync(dest); // adds to wait stack
}
};
/*
{
by : "ext",
accept : ["js", "html"],
reject : ["js", "html"]
}
*/
/**
Collects files from a folder based on the specified extension (or
extensions). Can be used to search recursively through all sub-folders, and can
search multiple extensions.
NOTE: Extension filtering is case-insensative, so files with both upper and lower-case extensions will be captured.
Provided as shortcut for [readdir](#readdir) with your own extension-checking filter.
* @method readExt
*
* @param {string} from - The path to search
* @param {string | array} [exts] - The extension to look for (e.g. "jpg"). To search for multiple extensions, use an array e.g. ["jpg", "png", "gif"].
* @param {boolean} [recursive] - Find all matching files in all sub-folders.
* @param {function} [filter] - A function to filter items on. The signature for this function's arguments is:
- isFolder (boolean): Whether the item is a folder or not
- file (string): The URI to the file
- stats (object) : Info for the file such as time. See Node's [statSync](https://nodejs.org/api/fs.html#fs_class_fs_stats)
- pathInfo (object) : Since we're already parsing the path via [path.parse](path.parse), we're sending the results fo thsi object to you.
*
* @return {array} - The resulting array contains only files that mathc the
specified extension(s).
*/
function readExt(from, exts, recursive, filter){
for(var i=0; i<exts.length; i++){
exts[i] = exts[i].toLowerCase();
}
var extFilter;
if( Array.isArray(exts) ){
extFilter = function(isFolder, file, stats){
if( isFolder ){
return false;
} else {
var item = path.parse( file );
var ok = exts.indexOf(item.ext.substr(1).toLowerCase()) > -1;
if(filter && ok){
ok = filter(isFolder, file, stats, item);
}
return ok;
}
}
} else {
extFilter = function(isFolder, file, stats){
if( isFolder ){
return false;
} else {
var item = path.parse( file );
var ok = item.ext.substr(1).toLowerCase() == exts;
if(filter && ok){
ok = filter(isFolder, file, stats, item);
}
return ok;
}
}
}
var obj = readdir(from, extFilter, recursive);
return obj.files;
}
/**
* Read a folder and returns an object containing all of the files and
folder in arrays.
* @method readdir
* @private
* @param {string} from - The path to the folder to read.
* @param {function} filter - A custom filter funciton should return boolean for inclusion. The function will be set arguments fo the following signature:
*
* filter( isFolder [boolean], file [URI string], stats [instance of Node's statSync] );
*
* // See Node's statSync : https://nodejs.org/api/fs.html#fs_class_fs_stats
*
* @param {boolean} recursive - Should we retrieve sub-folders too?
* @param {object} store - Used internally to store recursive findings.
Note that you may also provide this argument and readdir will populate your
existing files/folder list. But is recommended to leave this argument alone.
*
* @return {object} - An object containing a list of "files" and "folders"
(as properties of the returned list), where each is an array.
*
@example
var contents = readdir("/path/to/folder", null, true);
// yeids contents {
// files : [
// "/path/to/folder/1.foo",
// "/path/to/folder/2.bar",
// "/path/to/folder/3.png",
// "/path/to/folder/sub1/1.foo",
// "/path/to/folder/sub2/2.bar",
// "/path/to/folder/sub3/3.png"
// ],
// dirs : [
// "/path/to/folder/sub1",
// "/path/to/folder/sub2",
// "/path/to/folder/sub3"
//
// ]
// }
*/
function readdir(from, filter, recursive, store){
if( ! store ){
store = { dirs: [], files: [] };
}
var hasFilterFunction = typeof filter == 'function';
var files = fs.readdirSync(from);
var len = files.length;
for(var i=0; i<len; i++){
var file = path.join(from, files[i]);
var stats = false; // set this value otherwise a failing try will pickup the previous stats value (hoisted var)
// file may be a freak, and nod eay not be able to run stats on it.
try {
// stats = fs.lstatSync(file);
stats = fs.statSync(file); // de-reference symlinks (follows symbolic links)
} catch(e) {
// ignore
}
if(stats){
if ( stats.isDirectory() ) {
if(hasFilterFunction){
if( filter( true, file, stats ) ){
store.dirs.push(file);
}
} else {
store.dirs.push(file);
}
if(recursive){
readdir(file, filter, true, store);
}
} else if ( stats.isFile() ) {
if(hasFilterFunction){
if( filter( false, file, stats ) )
|
} else {
store.files.push(file);
}
}
}
}
return store;
}
/**
* Copies the entire folder's heirarchy folder from one location to another. If the other location doesn't exists, it will be constructed.
*
* @method copydir
* @private
* @param {string} from - The source folder
* @param {string} to - The destination folder (get's created if not exist)
*/
function copydir(from, to) {
var list = readdir(from, null, true);
if( ! exists(to) ){
makedir(to);
}
var dirs = list.dirs.sort(); // should be a little faster if we sort
for(var i=dirs.length; i--;){
makedir( path.join(to, path.relative(from, dirs[i])) );
}
var files = list.files;
for(var i=files.length; i--;){
var file = files[i];
fs.writeFileSync(
path.join(to, path.relative(from, file)),
fs.readFileSync(file, 'binary'),
'binary'
);
}
};
/**
* Recursively empties a folder of all it's contents (and all the sub-folder's contents), but leaves the source folder.
*
* @method emptydir
* @private
* @param {string} who - The source folder
* @param {boolean} dryRun - Prevents actual deletion, but still allows the return list to display what "will" be deleted.
*
* @return {array} - An array containing a list of paths to files and folders that we're deleted (or will be deleted when dryrun is true)
*/
function emptydir(who, dryRun) {
var removed = [];
if( exists(who) ) {
var list = readdir(who, null, true);
var files = list.files;
for(var i=files.length; i--;){
var file = files[i];
removed.push(file);
if( ! dryRun ){
fs.unlinkSync(file);
}
}
var dirs = list.dirs.sort(); // should be a little faster if we sort
for(var i=dirs.length; i--;){
var dir = dirs[i]
removed.push(dir);
if( ! dryRun ){
fs.rmdirSync(dir);
}
}
}
return removed;
};
/**
* Recursively removes a folder and all of it's sub-folders as well.
*
* @method removedir
* @private
* @param {string} who - The path to the folder
* @param {boolean} dryRun - Prevents actual deletion, but
|
{
store.files.push(file);
}
|
conditional_block
|
dirutils.js
|
constructed as needed.
For example if a folder exists here:
/path/to/folder
... but the following sub-folders don't exists:
/path/to/folder/sub/one/two/three
... Then the "sub/one/two/three" tree will be constructed inside "/path/to/folder")
* @method makedir
* @private
* @param {string} dest="path/to/make" The destination folder to create
*/
function makedir(dest) {
dest = path.resolve(dest);
if (!fs.existsSync(dest)) {
makedir(path.dirname(dest));
fs.mkdirSync(dest); // adds to wait stack
}
};
/*
{
by : "ext",
accept : ["js", "html"],
reject : ["js", "html"]
}
*/
/**
Collects files from a folder based on the specified extension (or
extensions). Can be used to search recursively through all sub-folders, and can
search multiple extensions.
NOTE: Extension filtering is case-insensative, so files with both upper and lower-case extensions will be captured.
Provided as shortcut for [readdir](#readdir) with your own extension-checking filter.
* @method readExt
*
* @param {string} from - The path to search
* @param {string | array} [exts] - The extension to look for (e.g. "jpg"). To search for multiple extensions, use an array e.g. ["jpg", "png", "gif"].
* @param {boolean} [recursive] - Find all matching files in all sub-folders.
* @param {function} [filter] - A function to filter items on. The signature for this function's arguments is:
- isFolder (boolean): Whether the item is a folder or not
- file (string): The URI to the file
- stats (object) : Info for the file such as time. See Node's [statSync](https://nodejs.org/api/fs.html#fs_class_fs_stats)
- pathInfo (object) : Since we're already parsing the path via [path.parse](path.parse), we're sending the results fo thsi object to you.
*
* @return {array} - The resulting array contains only files that mathc the
specified extension(s).
*/
function readExt(from, exts, recursive, filter){
for(var i=0; i<exts.length; i++){
exts[i] = exts[i].toLowerCase();
}
var extFilter;
if( Array.isArray(exts) ){
extFilter = function(isFolder, file, stats){
if( isFolder ){
return false;
} else {
var item = path.parse( file );
var ok = exts.indexOf(item.ext.substr(1).toLowerCase()) > -1;
if(filter && ok){
ok = filter(isFolder, file, stats, item);
}
return ok;
}
}
} else {
extFilter = function(isFolder, file, stats){
if( isFolder ){
return false;
} else {
var item = path.parse( file );
var ok = item.ext.substr(1).toLowerCase() == exts;
if(filter && ok){
ok = filter(isFolder, file, stats, item);
}
return ok;
}
}
}
var obj = readdir(from, extFilter, recursive);
return obj.files;
}
/**
* Read a folder and returns an object containing all of the files and
folder in arrays.
* @method readdir
* @private
* @param {string} from - The path to the folder to read.
* @param {function} filter - A custom filter funciton should return boolean for inclusion. The function will be set arguments fo the following signature:
*
* filter( isFolder [boolean], file [URI string], stats [instance of Node's statSync] );
*
* // See Node's statSync : https://nodejs.org/api/fs.html#fs_class_fs_stats
*
* @param {boolean} recursive - Should we retrieve sub-folders too?
* @param {object} store - Used internally to store recursive findings.
Note that you may also provide this argument and readdir will populate your
existing files/folder list. But is recommended to leave this argument alone.
*
* @return {object} - An object containing a list of "files" and "folders"
(as properties of the returned list), where each is an array.
*
@example
var contents = readdir("/path/to/folder", null, true);
// yeids contents {
// files : [
// "/path/to/folder/1.foo",
// "/path/to/folder/2.bar",
// "/path/to/folder/3.png",
// "/path/to/folder/sub1/1.foo",
// "/path/to/folder/sub2/2.bar",
// "/path/to/folder/sub3/3.png"
// ],
// dirs : [
// "/path/to/folder/sub1",
// "/path/to/folder/sub2",
// "/path/to/folder/sub3"
//
// ]
// }
*/
|
if( ! store ){
store = { dirs: [], files: [] };
}
var hasFilterFunction = typeof filter == 'function';
var files = fs.readdirSync(from);
var len = files.length;
for(var i=0; i<len; i++){
var file = path.join(from, files[i]);
var stats = false; // set this value otherwise a failing try will pickup the previous stats value (hoisted var)
// file may be a freak, and nod eay not be able to run stats on it.
try {
// stats = fs.lstatSync(file);
stats = fs.statSync(file); // de-reference symlinks (follows symbolic links)
} catch(e) {
// ignore
}
if(stats){
if ( stats.isDirectory() ) {
if(hasFilterFunction){
if( filter( true, file, stats ) ){
store.dirs.push(file);
}
} else {
store.dirs.push(file);
}
if(recursive){
readdir(file, filter, true, store);
}
} else if ( stats.isFile() ) {
if(hasFilterFunction){
if( filter( false, file, stats ) ){
store.files.push(file);
}
} else {
store.files.push(file);
}
}
}
}
return store;
}
/**
* Copies the entire folder's heirarchy folder from one location to another. If the other location doesn't exists, it will be constructed.
*
* @method copydir
* @private
* @param {string} from - The source folder
* @param {string} to - The destination folder (get's created if not exist)
*/
function copydir(from, to) {
var list = readdir(from, null, true);
if( ! exists(to) ){
makedir(to);
}
var dirs = list.dirs.sort(); // should be a little faster if we sort
for(var i=dirs.length; i--;){
makedir( path.join(to, path.relative(from, dirs[i])) );
}
var files = list.files;
for(var i=files.length; i--;){
var file = files[i];
fs.writeFileSync(
path.join(to, path.relative(from, file)),
fs.readFileSync(file, 'binary'),
'binary'
);
}
};
/**
* Recursively empties a folder of all it's contents (and all the sub-folder's contents), but leaves the source folder.
*
* @method emptydir
* @private
* @param {string} who - The source folder
* @param {boolean} dryRun - Prevents actual deletion, but still allows the return list to display what "will" be deleted.
*
* @return {array} - An array containing a list of paths to files and folders that we're deleted (or will be deleted when dryrun is true)
*/
function emptydir(who, dryRun) {
var removed = [];
if( exists(who) ) {
var list = readdir(who, null, true);
var files = list.files;
for(var i=files.length; i--;){
var file = files[i];
removed.push(file);
if( ! dryRun ){
fs.unlinkSync(file);
}
}
var dirs = list.dirs.sort(); // should be a little faster if we sort
for(var i=dirs.length; i--;){
var dir = dirs[i]
removed.push(dir);
if( ! dryRun ){
fs.rmdirSync(dir);
}
}
}
return removed;
};
/**
* Recursively removes a folder and all of it's sub-folders as well.
*
* @method removedir
* @private
* @param {string} who - The path to the folder
* @param {boolean} dryRun - Prevents actual deletion, but still allows
|
function readdir(from, filter, recursive, store){
|
random_line_split
|
model_sql_test.go
|
(src interface{}) error {
if value, ok := src.(string); ok {
*p = password{
hashed: value,
}
}
return nil
}
// used in gopg
func (p *password) ScanValue(rd gopg.TypesReader, n int) error {
value, err := gopg.TypesScanString(rd, n)
if err == nil {
*p = password{
hashed: value,
}
}
return err
}
func (p password) Value() (driver.Value, error) {
return p.hashed, nil
}
func (p password) MarshalJSON() ([]byte, error) {
return json.Marshal(p.clear)
}
func (p *password) UnmarshalJSON(t []byte) error {
var value string
if err := json.Unmarshal(t, &value); err != nil {
return err
}
*p = password{}
if value != "" {
p.Update(value)
}
return nil
}
var connStr string
func init() {
connStr = os.Getenv("DBCONNSTR")
if connStr == "" {
connStr = "postgres://localhost:5432/furktests?sslmode=disable"
}
}
func TestCRUDInPQ(t *testing.T) {
conn, err := pq.Open(connStr)
if err != nil {
t.Fatal(err)
}
testCRUD(t, conn)
}
func TestCRUDInPGX(t *testing.T) {
conn, err := pgx.Open(connStr)
if err != nil {
t.Fatal(err)
}
testCRUD(t, conn)
}
func TestCRUDInGOPG(t *testing.T) {
conn, err := gopg.Open(connStr)
if err != nil {
t.Fatal(err)
}
testCRUD(t, conn)
}
func testCRUD(_t *testing.T, conn db.DB) {
t := test{_t}
o := db.NewModel(order{})
o.SetConnection(conn)
o.SetLogger(logger.StandardLogger)
// drop table
err := o.NewSQLWithValues(o.DropSchema()).Execute()
if err != nil {
t.Fatal(err)
}
// create table
err = o.NewSQLWithValues(o.Schema()).Execute()
if err != nil {
t.Fatal(err)
}
randomBytes := make([]byte, 10)
if _, err := rand.Read(randomBytes); err != nil {
t.Fatal(err)
}
tradeNo := hex.EncodeToString(randomBytes)
totalAmount, _ := decimal.NewFromString("12.34")
createInput := strings.NewReader(`{
"Status": "changed",
"TradeNumber": "` + tradeNo + `",
"TotalAmount": "` + totalAmount.String() + `",
"foobar_user_id": 1,
"NotAllowed": "foo",
"Password": "123123",
"FieldInJsonb": "yes",
"otherjsonb": "no",
"testjsonb": 123,
"BadType": "string",
"Sources": [{
"Name": "yes",
"baddata": "foobar"
}],
"Sources2": {
"cash": 100
},
"Sources3": {
"Word": "finish"
}
}`)
var createData map[string]interface{}
if err := json.NewDecoder(createInput).Decode(&createData); err != nil {
t.Fatal(err)
}
model := db.NewModel(order{}, conn, logger.StandardLogger)
var id int
err = model.Insert(
model.Permit(
"Status", "TradeNumber", "UserId", "Password", "FieldInJsonb", "OtherJsonb",
"jsonbTest", "TotalAmount", "BadType", "Sources", "Sources2", "Sources3",
).Filter(createData),
model.Changes(db.RawChanges{
"name": "foobar",
"title": "hello",
"Status": "new",
}),
model.CreatedAt(),
model.UpdatedAt(),
)("RETURNING id").QueryRow(&id)
if err != nil {
t.Fatal(err)
}
t.Int("first order id", id, 1)
var badType, sources, sources2, sources3 string
model.Select(
"COALESCE(meta->>'bad_type', 'empty'), meta->>'sources', meta2::text, meta3::text",
).MustQueryRow(&badType, &sources, &sources2, &sources3)
// field with wrong type is skipped, so empty is returned
t.String("first order bad type", badType, "empty")
// unwanted content "baddata" is filtered
t.String("first order sources", sources, `[{"Name": "yes"}]`)
t.String("first order sources 2", sources2, `{"sources2": {"cash": 100}}`) // map
t.String("first order sources 3", sources3, `{"sources3": {"Word": "finish"}}`) // struct
exists := model.MustExists("WHERE id = $1", id)
t.Bool("first order exists", exists)
exists2 := model.MustExists("WHERE id = $1", id+1)
t.Bool("first order exists #2", exists2 == false)
err = model.Insert(
model.Changes(db.RawChanges{
"Status": "new2",
}),
)("RETURNING id").QueryRow(&id)
if err != nil {
t.Fatal(err)
}
t.Int("second order id", id, 2)
var statuses []string
model.Select("status").MustQuery(&statuses)
t.Int("statuses length", len(statuses), 2)
t.String("status 0", statuses[0], "new")
t.String("status 1", statuses[1], "new2")
var ids []int
model.Select("id").MustQuery(&ids)
t.Int("ids length", len(ids), 2)
t.Int("id 0", ids[0], 1)
t.Int("id 1", ids[1], 2)
id2status := map[int]string{}
model.Select("id, status").MustQuery(&id2status)
t.Int("map length", len(id2status), 2)
t.String("map 0", id2status[1], "new")
t.String("map 1", id2status[2], "new2")
var status2id map[string]int
model.Select("status, id").MustQuery(&status2id)
t.Int("map length", len(status2id), 2)
t.Int("map 0", status2id["new"], 1)
t.Int("map 1", status2id["new2"], 2)
var createdAts []time.Time
model.Select("created_at").MustQuery(&createdAts)
t.Int("created_at length", len(createdAts), 2)
d1 := time.Since(createdAts[0])
d2 := time.Since(createdAts[1])
t.Bool("created_at 0", d1 > 0 && d1 < 200*time.Millisecond)
t.Bool("created_at 1", d2 > 0 && d2 < 200*time.Millisecond)
var customOrders []struct {
status string
id int
}
db.NewModelTable("orders", conn, logger.StandardLogger).
Select("status, id", "ORDER BY id ASC").MustQuery(&customOrders)
t.String("custom order struct", fmt.Sprintf("%+v", customOrders), "[{status:new id:1} {status:new2 id:2}]")
var firstOrder order
err = model.Find("ORDER BY created_at ASC LIMIT 1").Query(&firstOrder) // "LIMIT 1" only necessary for gopg
if err != nil {
t.Fatal(err)
}
t.Int("order id", firstOrder.Id, 1)
t.String("order status", firstOrder.Status, "new")
t.String("order trade number", firstOrder.TradeNumber, tradeNo)
t.Decimal("order total amount", firstOrder.TotalAmount, totalAmount)
t.Int("order user", firstOrder.UserId, 1)
t.String("order name", firstOrder.name, "foobar")
t.String("order title", firstOrder.title, "hello")
ca := time.Since(firstOrder.CreatedAt)
ua := time.Since(firstOrder.UpdatedAt)
t.Bool("order created at", ca > 0 && ca < 200*time.Millisecond)
t.Bool("order updated at", ua > 0 && ua < 200*time.Millisecond)
t.String("order ignored", firstOrder.Ignored, "")
t.String("order ignored #2", firstOrder.ignored, "")
t.String("order password", firstOrder.Password.String(), "4297f44b13955235245b2497399d7a93")
t.Bool("order password 2", firstOrder.Password.Equal("123123"))
t.String("order FieldInJsonb", firstOrder.FieldInJsonb, "yes")
t.String("order OtherJsonb", firstOrder.OtherJsonb, "no")
t.Int("order jsonbTest", firstOrder.jsonbTest, 123)
var c echoContext
changes, err := model.Permit().Bind(c, &firstOrder)
if err != nil {
t.Fatal(err)
}
t.Int("bind changes
|
Scan
|
identifier_name
|
|
model_sql_test.go
|
used in gopg
func (p *password) ScanValue(rd gopg.TypesReader, n int) error {
value, err := gopg.TypesScanString(rd, n)
if err == nil {
*p = password{
hashed: value,
}
}
return err
}
func (p password) Value() (driver.Value, error) {
return p.hashed, nil
}
func (p password) MarshalJSON() ([]byte, error) {
return json.Marshal(p.clear)
}
func (p *password) UnmarshalJSON(t []byte) error {
var value string
if err := json.Unmarshal(t, &value); err != nil {
return err
}
*p = password{}
if value != "" {
p.Update(value)
}
return nil
}
var connStr string
func init() {
connStr = os.Getenv("DBCONNSTR")
if connStr == "" {
connStr = "postgres://localhost:5432/furktests?sslmode=disable"
}
}
func TestCRUDInPQ(t *testing.T) {
conn, err := pq.Open(connStr)
if err != nil {
t.Fatal(err)
}
testCRUD(t, conn)
}
func TestCRUDInPGX(t *testing.T) {
conn, err := pgx.Open(connStr)
if err != nil {
t.Fatal(err)
}
testCRUD(t, conn)
}
func TestCRUDInGOPG(t *testing.T) {
conn, err := gopg.Open(connStr)
if err != nil {
t.Fatal(err)
}
testCRUD(t, conn)
}
func testCRUD(_t *testing.T, conn db.DB)
|
if _, err := rand.Read(randomBytes); err != nil {
t.Fatal(err)
}
tradeNo := hex.EncodeToString(randomBytes)
totalAmount, _ := decimal.NewFromString("12.34")
createInput := strings.NewReader(`{
"Status": "changed",
"TradeNumber": "` + tradeNo + `",
"TotalAmount": "` + totalAmount.String() + `",
"foobar_user_id": 1,
"NotAllowed": "foo",
"Password": "123123",
"FieldInJsonb": "yes",
"otherjsonb": "no",
"testjsonb": 123,
"BadType": "string",
"Sources": [{
"Name": "yes",
"baddata": "foobar"
}],
"Sources2": {
"cash": 100
},
"Sources3": {
"Word": "finish"
}
}`)
var createData map[string]interface{}
if err := json.NewDecoder(createInput).Decode(&createData); err != nil {
t.Fatal(err)
}
model := db.NewModel(order{}, conn, logger.StandardLogger)
var id int
err = model.Insert(
model.Permit(
"Status", "TradeNumber", "UserId", "Password", "FieldInJsonb", "OtherJsonb",
"jsonbTest", "TotalAmount", "BadType", "Sources", "Sources2", "Sources3",
).Filter(createData),
model.Changes(db.RawChanges{
"name": "foobar",
"title": "hello",
"Status": "new",
}),
model.CreatedAt(),
model.UpdatedAt(),
)("RETURNING id").QueryRow(&id)
if err != nil {
t.Fatal(err)
}
t.Int("first order id", id, 1)
var badType, sources, sources2, sources3 string
model.Select(
"COALESCE(meta->>'bad_type', 'empty'), meta->>'sources', meta2::text, meta3::text",
).MustQueryRow(&badType, &sources, &sources2, &sources3)
// field with wrong type is skipped, so empty is returned
t.String("first order bad type", badType, "empty")
// unwanted content "baddata" is filtered
t.String("first order sources", sources, `[{"Name": "yes"}]`)
t.String("first order sources 2", sources2, `{"sources2": {"cash": 100}}`) // map
t.String("first order sources 3", sources3, `{"sources3": {"Word": "finish"}}`) // struct
exists := model.MustExists("WHERE id = $1", id)
t.Bool("first order exists", exists)
exists2 := model.MustExists("WHERE id = $1", id+1)
t.Bool("first order exists #2", exists2 == false)
err = model.Insert(
model.Changes(db.RawChanges{
"Status": "new2",
}),
)("RETURNING id").QueryRow(&id)
if err != nil {
t.Fatal(err)
}
t.Int("second order id", id, 2)
var statuses []string
model.Select("status").MustQuery(&statuses)
t.Int("statuses length", len(statuses), 2)
t.String("status 0", statuses[0], "new")
t.String("status 1", statuses[1], "new2")
var ids []int
model.Select("id").MustQuery(&ids)
t.Int("ids length", len(ids), 2)
t.Int("id 0", ids[0], 1)
t.Int("id 1", ids[1], 2)
id2status := map[int]string{}
model.Select("id, status").MustQuery(&id2status)
t.Int("map length", len(id2status), 2)
t.String("map 0", id2status[1], "new")
t.String("map 1", id2status[2], "new2")
var status2id map[string]int
model.Select("status, id").MustQuery(&status2id)
t.Int("map length", len(status2id), 2)
t.Int("map 0", status2id["new"], 1)
t.Int("map 1", status2id["new2"], 2)
var createdAts []time.Time
model.Select("created_at").MustQuery(&createdAts)
t.Int("created_at length", len(createdAts), 2)
d1 := time.Since(createdAts[0])
d2 := time.Since(createdAts[1])
t.Bool("created_at 0", d1 > 0 && d1 < 200*time.Millisecond)
t.Bool("created_at 1", d2 > 0 && d2 < 200*time.Millisecond)
var customOrders []struct {
status string
id int
}
db.NewModelTable("orders", conn, logger.StandardLogger).
Select("status, id", "ORDER BY id ASC").MustQuery(&customOrders)
t.String("custom order struct", fmt.Sprintf("%+v", customOrders), "[{status:new id:1} {status:new2 id:2}]")
var firstOrder order
err = model.Find("ORDER BY created_at ASC LIMIT 1").Query(&firstOrder) // "LIMIT 1" only necessary for gopg
if err != nil {
t.Fatal(err)
}
t.Int("order id", firstOrder.Id, 1)
t.String("order status", firstOrder.Status, "new")
t.String("order trade number", firstOrder.TradeNumber, tradeNo)
t.Decimal("order total amount", firstOrder.TotalAmount, totalAmount)
t.Int("order user", firstOrder.UserId, 1)
t.String("order name", firstOrder.name, "foobar")
t.String("order title", firstOrder.title, "hello")
ca := time.Since(firstOrder.CreatedAt)
ua := time.Since(firstOrder.UpdatedAt)
t.Bool("order created at", ca > 0 && ca < 200*time.Millisecond)
t.Bool("order updated at", ua > 0 && ua < 200*time.Millisecond)
t.String("order ignored", firstOrder.Ignored, "")
t.String("order ignored #2", firstOrder.ignored, "")
t.String("order password", firstOrder.Password.String(), "4297f44b13955235245b2497399d7a93")
t.Bool("order password 2", firstOrder.Password.Equal("123123"))
t.String("order FieldInJsonb", firstOrder.FieldInJsonb, "yes")
t.String("order OtherJsonb", firstOrder.OtherJsonb, "no")
t.Int("order jsonbTest", firstOrder.jsonbTest, 123)
var c echoContext
changes, err := model.Permit().Bind(c, &firstOrder)
if err != nil {
t.Fatal(err)
}
t.Int("bind changes size", len(changes), 0)
t.Int("bind order id", firstOrder.Id, 1)
t.String("bind order status", firstOrder.Status, "new")
t.String
|
{
t := test{_t}
o := db.NewModel(order{})
o.SetConnection(conn)
o.SetLogger(logger.StandardLogger)
// drop table
err := o.NewSQLWithValues(o.DropSchema()).Execute()
if err != nil {
t.Fatal(err)
}
// create table
err = o.NewSQLWithValues(o.Schema()).Execute()
if err != nil {
t.Fatal(err)
}
randomBytes := make([]byte, 10)
|
identifier_body
|
model_sql_test.go
|
used in gopg
func (p *password) ScanValue(rd gopg.TypesReader, n int) error {
value, err := gopg.TypesScanString(rd, n)
if err == nil {
*p = password{
hashed: value,
}
}
return err
}
func (p password) Value() (driver.Value, error) {
return p.hashed, nil
}
func (p password) MarshalJSON() ([]byte, error) {
return json.Marshal(p.clear)
}
func (p *password) UnmarshalJSON(t []byte) error {
var value string
if err := json.Unmarshal(t, &value); err != nil {
return err
}
*p = password{}
if value != "" {
p.Update(value)
}
return nil
}
var connStr string
func init() {
connStr = os.Getenv("DBCONNSTR")
if connStr == "" {
connStr = "postgres://localhost:5432/furktests?sslmode=disable"
}
}
func TestCRUDInPQ(t *testing.T) {
conn, err := pq.Open(connStr)
if err != nil {
t.Fatal(err)
}
testCRUD(t, conn)
}
func TestCRUDInPGX(t *testing.T) {
conn, err := pgx.Open(connStr)
if err != nil {
t.Fatal(err)
}
testCRUD(t, conn)
}
func TestCRUDInGOPG(t *testing.T) {
conn, err := gopg.Open(connStr)
if err != nil {
t.Fatal(err)
}
testCRUD(t, conn)
}
func testCRUD(_t *testing.T, conn db.DB) {
t := test{_t}
o := db.NewModel(order{})
o.SetConnection(conn)
o.SetLogger(logger.StandardLogger)
// drop table
err := o.NewSQLWithValues(o.DropSchema()).Execute()
if err != nil
|
// create table
err = o.NewSQLWithValues(o.Schema()).Execute()
if err != nil {
t.Fatal(err)
}
randomBytes := make([]byte, 10)
if _, err := rand.Read(randomBytes); err != nil {
t.Fatal(err)
}
tradeNo := hex.EncodeToString(randomBytes)
totalAmount, _ := decimal.NewFromString("12.34")
createInput := strings.NewReader(`{
"Status": "changed",
"TradeNumber": "` + tradeNo + `",
"TotalAmount": "` + totalAmount.String() + `",
"foobar_user_id": 1,
"NotAllowed": "foo",
"Password": "123123",
"FieldInJsonb": "yes",
"otherjsonb": "no",
"testjsonb": 123,
"BadType": "string",
"Sources": [{
"Name": "yes",
"baddata": "foobar"
}],
"Sources2": {
"cash": 100
},
"Sources3": {
"Word": "finish"
}
}`)
var createData map[string]interface{}
if err := json.NewDecoder(createInput).Decode(&createData); err != nil {
t.Fatal(err)
}
model := db.NewModel(order{}, conn, logger.StandardLogger)
var id int
err = model.Insert(
model.Permit(
"Status", "TradeNumber", "UserId", "Password", "FieldInJsonb", "OtherJsonb",
"jsonbTest", "TotalAmount", "BadType", "Sources", "Sources2", "Sources3",
).Filter(createData),
model.Changes(db.RawChanges{
"name": "foobar",
"title": "hello",
"Status": "new",
}),
model.CreatedAt(),
model.UpdatedAt(),
)("RETURNING id").QueryRow(&id)
if err != nil {
t.Fatal(err)
}
t.Int("first order id", id, 1)
var badType, sources, sources2, sources3 string
model.Select(
"COALESCE(meta->>'bad_type', 'empty'), meta->>'sources', meta2::text, meta3::text",
).MustQueryRow(&badType, &sources, &sources2, &sources3)
// field with wrong type is skipped, so empty is returned
t.String("first order bad type", badType, "empty")
// unwanted content "baddata" is filtered
t.String("first order sources", sources, `[{"Name": "yes"}]`)
t.String("first order sources 2", sources2, `{"sources2": {"cash": 100}}`) // map
t.String("first order sources 3", sources3, `{"sources3": {"Word": "finish"}}`) // struct
exists := model.MustExists("WHERE id = $1", id)
t.Bool("first order exists", exists)
exists2 := model.MustExists("WHERE id = $1", id+1)
t.Bool("first order exists #2", exists2 == false)
err = model.Insert(
model.Changes(db.RawChanges{
"Status": "new2",
}),
)("RETURNING id").QueryRow(&id)
if err != nil {
t.Fatal(err)
}
t.Int("second order id", id, 2)
var statuses []string
model.Select("status").MustQuery(&statuses)
t.Int("statuses length", len(statuses), 2)
t.String("status 0", statuses[0], "new")
t.String("status 1", statuses[1], "new2")
var ids []int
model.Select("id").MustQuery(&ids)
t.Int("ids length", len(ids), 2)
t.Int("id 0", ids[0], 1)
t.Int("id 1", ids[1], 2)
id2status := map[int]string{}
model.Select("id, status").MustQuery(&id2status)
t.Int("map length", len(id2status), 2)
t.String("map 0", id2status[1], "new")
t.String("map 1", id2status[2], "new2")
var status2id map[string]int
model.Select("status, id").MustQuery(&status2id)
t.Int("map length", len(status2id), 2)
t.Int("map 0", status2id["new"], 1)
t.Int("map 1", status2id["new2"], 2)
var createdAts []time.Time
model.Select("created_at").MustQuery(&createdAts)
t.Int("created_at length", len(createdAts), 2)
d1 := time.Since(createdAts[0])
d2 := time.Since(createdAts[1])
t.Bool("created_at 0", d1 > 0 && d1 < 200*time.Millisecond)
t.Bool("created_at 1", d2 > 0 && d2 < 200*time.Millisecond)
var customOrders []struct {
status string
id int
}
db.NewModelTable("orders", conn, logger.StandardLogger).
Select("status, id", "ORDER BY id ASC").MustQuery(&customOrders)
t.String("custom order struct", fmt.Sprintf("%+v", customOrders), "[{status:new id:1} {status:new2 id:2}]")
var firstOrder order
err = model.Find("ORDER BY created_at ASC LIMIT 1").Query(&firstOrder) // "LIMIT 1" only necessary for gopg
if err != nil {
t.Fatal(err)
}
t.Int("order id", firstOrder.Id, 1)
t.String("order status", firstOrder.Status, "new")
t.String("order trade number", firstOrder.TradeNumber, tradeNo)
t.Decimal("order total amount", firstOrder.TotalAmount, totalAmount)
t.Int("order user", firstOrder.UserId, 1)
t.String("order name", firstOrder.name, "foobar")
t.String("order title", firstOrder.title, "hello")
ca := time.Since(firstOrder.CreatedAt)
ua := time.Since(firstOrder.UpdatedAt)
t.Bool("order created at", ca > 0 && ca < 200*time.Millisecond)
t.Bool("order updated at", ua > 0 && ua < 200*time.Millisecond)
t.String("order ignored", firstOrder.Ignored, "")
t.String("order ignored #2", firstOrder.ignored, "")
t.String("order password", firstOrder.Password.String(), "4297f44b13955235245b2497399d7a93")
t.Bool("order password 2", firstOrder.Password.Equal("123123"))
t.String("order FieldInJsonb", firstOrder.FieldInJsonb, "yes")
t.String("order OtherJsonb", firstOrder.OtherJsonb, "no")
t.Int("order jsonbTest", firstOrder.jsonbTest, 123)
var c echoContext
changes, err := model.Permit().Bind(c, &firstOrder)
if err != nil {
t.Fatal(err)
}
t.Int("bind changes size", len(changes), 0)
t.Int("bind order id", firstOrder.Id, 1)
t.String("bind order status", firstOrder.Status, "new")
t
|
{
t.Fatal(err)
}
|
conditional_block
|
model_sql_test.go
|
t.Fatal(err)
}
t.Int("second order id", id, 2)
var statuses []string
model.Select("status").MustQuery(&statuses)
t.Int("statuses length", len(statuses), 2)
t.String("status 0", statuses[0], "new")
t.String("status 1", statuses[1], "new2")
var ids []int
model.Select("id").MustQuery(&ids)
t.Int("ids length", len(ids), 2)
t.Int("id 0", ids[0], 1)
t.Int("id 1", ids[1], 2)
id2status := map[int]string{}
model.Select("id, status").MustQuery(&id2status)
t.Int("map length", len(id2status), 2)
t.String("map 0", id2status[1], "new")
t.String("map 1", id2status[2], "new2")
var status2id map[string]int
model.Select("status, id").MustQuery(&status2id)
t.Int("map length", len(status2id), 2)
t.Int("map 0", status2id["new"], 1)
t.Int("map 1", status2id["new2"], 2)
var createdAts []time.Time
model.Select("created_at").MustQuery(&createdAts)
t.Int("created_at length", len(createdAts), 2)
d1 := time.Since(createdAts[0])
d2 := time.Since(createdAts[1])
t.Bool("created_at 0", d1 > 0 && d1 < 200*time.Millisecond)
t.Bool("created_at 1", d2 > 0 && d2 < 200*time.Millisecond)
var customOrders []struct {
status string
id int
}
db.NewModelTable("orders", conn, logger.StandardLogger).
Select("status, id", "ORDER BY id ASC").MustQuery(&customOrders)
t.String("custom order struct", fmt.Sprintf("%+v", customOrders), "[{status:new id:1} {status:new2 id:2}]")
var firstOrder order
err = model.Find("ORDER BY created_at ASC LIMIT 1").Query(&firstOrder) // "LIMIT 1" only necessary for gopg
if err != nil {
t.Fatal(err)
}
t.Int("order id", firstOrder.Id, 1)
t.String("order status", firstOrder.Status, "new")
t.String("order trade number", firstOrder.TradeNumber, tradeNo)
t.Decimal("order total amount", firstOrder.TotalAmount, totalAmount)
t.Int("order user", firstOrder.UserId, 1)
t.String("order name", firstOrder.name, "foobar")
t.String("order title", firstOrder.title, "hello")
ca := time.Since(firstOrder.CreatedAt)
ua := time.Since(firstOrder.UpdatedAt)
t.Bool("order created at", ca > 0 && ca < 200*time.Millisecond)
t.Bool("order updated at", ua > 0 && ua < 200*time.Millisecond)
t.String("order ignored", firstOrder.Ignored, "")
t.String("order ignored #2", firstOrder.ignored, "")
t.String("order password", firstOrder.Password.String(), "4297f44b13955235245b2497399d7a93")
t.Bool("order password 2", firstOrder.Password.Equal("123123"))
t.String("order FieldInJsonb", firstOrder.FieldInJsonb, "yes")
t.String("order OtherJsonb", firstOrder.OtherJsonb, "no")
t.Int("order jsonbTest", firstOrder.jsonbTest, 123)
var c echoContext
changes, err := model.Permit().Bind(c, &firstOrder)
if err != nil {
t.Fatal(err)
}
t.Int("bind changes size", len(changes), 0)
t.Int("bind order id", firstOrder.Id, 1)
t.String("bind order status", firstOrder.Status, "new")
t.String("bind order trade number", firstOrder.TradeNumber, tradeNo)
changes, err = model.Permit("Id", "TradeNumber").Bind(c, &firstOrder)
if err != nil {
t.Fatal(err)
}
t.Int("bind changes size", len(changes), 2)
t.Int("bind order id", firstOrder.Id, 2)
t.String("bind order status", firstOrder.Status, "new")
t.String("bind order trade number", firstOrder.TradeNumber, "")
var orders []order
err = model.Find("ORDER BY created_at DESC").Query(&orders)
if err != nil {
t.Fatal(err)
}
t.Int("orders size", len(orders), 2)
t.Int("first order id", orders[0].Id, 2)
t.Int("first order jsonbTest", orders[0].jsonbTest, 0)
t.Int("second order id", orders[1].Id, 1)
t.Int("second order jsonbTest", orders[1].jsonbTest, 123)
time.Sleep(200 * time.Millisecond)
updateInput := strings.NewReader(`{
"Status": "modified",
"NotAllowed": "foo",
"FieldInJsonb": "red",
"otherjsonb": "blue"
}`)
var updateData map[string]interface{}
err = json.NewDecoder(updateInput).Decode(&updateData)
if err != nil {
t.Fatal(err)
}
var ao order
achanges, err := model.Assign(
&ao,
model.Permit("Status", "FieldInJsonb", "OtherJsonb").Filter(updateData),
model.Permit("Status").Filter(db.RawChanges{
"x": "1",
"Status": "furk",
"FieldInJsonb": "black",
}),
model.UpdatedAt(),
)
if err != nil {
t.Fatal(err)
}
t.String("order status", ao.Status, "furk")
t.String("order FieldInJsonb", ao.FieldInJsonb, "red")
t.String("order OtherJsonb", ao.OtherJsonb, "blue")
var rowsAffected int
err = model.Update(achanges...)().ExecuteInTransaction(&db.TxOptions{
IsolationLevel: db.LevelSerializable,
Before: func(ctx context.Context, tx db.Tx) (err error) {
err = model.NewSQLWithValues(
"UPDATE "+model.TableName()+" SET user_id = user_id - $1",
23,
).ExecTx(tx, ctx)
return
},
After: func(ctx context.Context, tx db.Tx) (err error) {
err = model.NewSQLWithValues(
"UPDATE "+model.TableName()+" SET user_id = user_id + $1",
99,
).ExecTx(tx, ctx)
return
},
}, &rowsAffected)
if err != nil {
t.Fatal(err)
}
t.Int("rows affected", rowsAffected, 2)
var secondOrder order
err = model.Find("WHERE id = $1", 2).Query(&secondOrder)
if err != nil {
t.Fatal(err)
}
t.Int("order id", secondOrder.Id, 2)
t.String("order status", secondOrder.Status, "furk")
ca = time.Since(secondOrder.CreatedAt)
ua = time.Since(secondOrder.UpdatedAt)
t.Bool("order created at", ca > 200*time.Millisecond) // because of time.Sleep
t.Bool("order updated at", ua > 0 && ua < 200*time.Millisecond)
t.String("order FieldInJsonb", secondOrder.FieldInJsonb, "red")
t.String("order OtherJsonb", secondOrder.OtherJsonb, "blue")
var u int
t.Int("order user", secondOrder.UserId, u-23+99)
count, err := model.Count()
if err != nil {
t.Fatal(err)
}
t.Int("rows count", count, 2)
var rowsDeleted int
err = model.Delete().Execute(&rowsDeleted)
if err != nil {
t.Fatal(err)
}
t.Int("rows deleted", rowsDeleted, 2)
count, err = model.Count()
if err != nil {
t.Fatal(err)
}
t.Int("rows count", count, 0)
}
func (t *test) Bool(name string, b bool) {
t.Helper()
if b {
t.Logf("%s test passed", name)
} else {
t.Errorf("%s test failed, got %t", name, b)
}
}
func (t *test) String(name, got, expected string) {
t.Helper()
if got == expected {
t.Logf("%s test passed", name)
} else {
t.Errorf("%s test failed, got %s", name, got)
}
}
func (t *test) Int(name string, got, expected int) {
t.Helper()
if got == expected {
t.Logf("%s test passed", name)
} else {
t.Errorf("%s test failed, got %d", name, got)
}
}
func (t *test) Decimal(name string, got, expected decimal.Decimal) {
t.Helper()
if got.Equal(expected) {
|
t.Logf("%s test passed", name)
} else {
t.Errorf("%s test failed, got %d", name, got)
}
}
|
random_line_split
|
|
land_ocean_ratio.py
|
and latitude values.
Parameters
----------
georeferenced_array : 2D Array
DESCRIPTION.
loc0 : float or integer
The first extent value of the georeferenced array.
loc1 : float or integer
The second extent value of the georeferenced array.
Returns
-------
interpolated_coordinates : 1D array
Interpolated latitude or longitude values for the length of the
georeferenced array.
"""
# Extracts axis 1 (columns) from the input array.
# This represents the longitude.
if extent_minimum == x0:
inn = georeferenced_array[0, :]
# Extracts axis 0 (rows) from the input array.
# This represents the latitude.
elif extent_minimum == y0:
inn = georeferenced_array[:, 0]
#
linear_interpolation = [((i-0)*(extent_maximum-extent_minimum)/(
(len(inn)-1)-0)+extent_minimum) for i, r in enumerate(inn)]
# Claculates the difference between the value in front and the value
# behind in the list
difference = [y - x for x, y in zip(linear_interpolation,
linear_interpolation[1:])]
# Calculates the size of each array so to compare it to the size of the
# input array.
array_length = [np.size(np.arange(
extent_minimum, extent_maximum, i)) for i in difference]
# Select values that only match the longitude/latitude length then return
# the first index in the list of matched values.
# This list is a list of indexes that correspond to the index in the
# variable difference.
index_of_correct_value = [i for i, v in enumerate(
array_length) if v == len(inn)][0]
#
interpolated_coordinates = np.arange(extent_minimum,
extent_maximum,
difference[index_of_correct_value])
return interpolated_coordinates
x_longitude = linear_interpolation_of_x_y(test, x0, x1)
y_latitude = linear_interpolation_of_x_y(test, y0, y1)
xx_longitude, yy_longitude = np.meshgrid(x_longitude, y_latitude[::-1])
mask = shapely.vectorized.contains(irregular_study_area,
xx_longitude,
yy_longitude)
def mask_and_binarize(polygon_mask, area_of_interest_raster, threshold):
raster_array = raster_to_array(area_of_interest_raster)
# Pixels outside of the polygon are assigned nan values.
masked = np.where(mask == True, raster_array, np.nan)
binerized_array = np.where(masked >= threshold, 255, 0)
box_top, box_bottom, box_left, box_right = 616, 649, 637, 681
# Draw hollow rectangle with 2px border width on left and 1px for rest.
# -9999 is a random value I chose. Easier to detect in image.
binerized_array[box_top:box_bottom, box_left:box_left+2] = -9999
binerized_array[box_top:box_bottom, box_right-1:box_right] = -9999
binerized_array[box_top:box_top+1, box_left:box_right] = -9999
binerized_array[box_bottom-1:box_bottom, box_left:box_right] = -9999
# If pixels are not equal to -9999 keep the pixel value.
# Pixels that are equal to -9999 are assigned 'nan'.
binerized_array = np.where(binerized_array != -9999, binerized_array, np.nan)
binerized_array = np.ma.array(binerized_array, mask=np.isnan(masked))
return binerized_array
sample_mask = mask_and_binarize(mask, georeferenced_images[0], 150)
# %% Recreating the sea_land_ratio method
def sea_land_ratio_calculation(masked_array, box_perimeter_fill_value):
cleaned_array = np.nan_to_num(masked_array,
copy=True,
nan=box_perimeter_fill_value,
posinf=None,
neginf=None)
image_pixels = cleaned_array.count()
image_nans = np.isnan(cleaned_array).sum()
non_nan_pixels = image_pixels-image_nans
land_pixels = np.count_nonzero(cleaned_array == 255)
ocean_pixels = np.count_nonzero(cleaned_array == 0)
if type(box_perimeter_fill_value) == float:
land_percentage = round((land_pixels/non_nan_pixels)*100, 4)
ocean_percentage = round((ocean_pixels/non_nan_pixels)*100, 4)
elif type(box_perimeter_fill_value) == int:
land_percentage = round((land_pixels/image_pixels)*100, 4)
ocean_percentage = round((ocean_pixels/image_pixels)*100, 4)
land_ocean_ratio = round(land_percentage/ocean_percentage, 4)
return land_percentage, ocean_percentage, land_ocean_ratio
# %% testing the sea_land_ratio definition
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/georeferenced')
# apply_box_perimeter_mask = 'yes'
box_perimeter_fill_value = np.nan
cleaned_array = np.nan_to_num(sample_mask,
copy=True,
nan=box_perimeter_fill_value,
posinf=None,
neginf=None)
image_pixels = cleaned_array.count()
image_nans = np.isnan(cleaned_array).sum()
non_nan_pixels = image_pixels-image_nans
land_pixels = np.count_nonzero(cleaned_array == 255)
ocean_pixels = np.count_nonzero(cleaned_array == 0)
if type(box_perimeter_fill_value) == float:
land_percentage = round((land_pixels/non_nan_pixels)*100, 4)
ocean_percentage = round((ocean_pixels/non_nan_pixels)*100, 4)
elif type(box_perimeter_fill_value) == int:
land_percentage = round((land_pixels/image_pixels)*100, 4)
ocean_percentage = round((ocean_pixels/image_pixels)*100, 4)
# land_percentage = round((land_pixels/image_pixels)*100, 4)
# ocean_percentage = round((ocean_pixels/image_pixels)*100, 4)
land_ocean_ratio = round(land_percentage/ocean_percentage, 10)
print(f'{box_perimeter_fill_value}', land_ocean_ratio)
map_projection = ccrs.PlateCarree()
fig, axes = plt.subplots(nrows=2,
ncols=1,
figsize=(15, 5), subplot_kw={'projection': map_projection})
from matplotlib import colors
cmap = colors.ListedColormap(['dodgerblue', 'tan'])
import cartopy.feature as cfeature
georeferenced_images = os.listdir()
georeferenced_images.sort()
img = Image.open(georeferenced_images[1])
ax = axes[0]
ax.imshow(img,
origin='upper',
extent=map_extent(georeferenced_images[0]),
cmap=cmap)
continents = cfeature.NaturalEarthFeature(category='physical',
name='land',
scale='50m',
edgecolor='face')
ax.add_feature(continents,
facecolor='none',
edgecolor='grey',
lw=1)
ax.set_extent([90, 155, -25, 20], crs=map_projection)
ax = axes[1]
ax.imshow(cleaned_array,
origin='upper',
extent=map_extent(georeferenced_images[0]),
cmap=cmap)
ax.add_feature(continents,
facecolor='none',
edgecolor='grey',
lw=1)
ax.set_extent([90, 155, -25, 20], crs=map_projection)
# ax.set_extent([91, 150.59, -20.35, 20.61], crs=map_projection)
unmodified_image = raster_array = raster_to_array(georeferenced_images[0])
# %%
def multi_process_mask_and_binarize(polygon_mask,
area_of_interest_raster,
threshold):
with concurrent.futures.ProcessPoolExecutor() as executor:
processed_image = executor.map(mask_and_binarize,
repeat(polygon_mask),
area_of_interest_raster,
repeat(threshold))
return processed_image
processed_image = multi_process_mask_and_binarize(mask,
georeferenced_images,
150)
sea_land_ratio = [sea_land_ratio_calculation(i, 0) for i in processed_image]
# sea_land_ratio = [sea_land_ratio_calculation(i, 'yes mask') for i in processed_image]
# %%
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/Images2')
def CropImage(image, left, top, right, bottom):
open_image = Image.open(image)
# Cropped image of above dimension
# (It will not change orginal image)
cropped_image = open_image.crop((left,
top,
right,
bottom))
return cropped_image
def
|
MultiProcessCrop
|
identifier_name
|
|
land_ocean_ratio.py
|
0, 0, right_x, bottom_y),
gdal.GCP(0, -60, 0, left_x, bottom_y)]
ds.SetProjection(sr.ExportToWkt())
wkt = ds.GetProjection()
ds.SetGCPs(gcps, wkt)
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/georeferenced')
gdal.Warp(f"{tif_image}.tif", ds, dstSRS='EPSG:4326', format='gtiff')
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/Images2')
os.remove('temporary.tif')
ds= None
if georeference_images == 'yes':
for i, w in zip(image_names, image_names_no_extension):
georeferenced_images(i, w)
# %%
# starts in the top left going clockwise finishing at top left (x, y).
# coordinates in decimal degrees.
irregular_study_area = Polygon([(98, 13.5),
(125, 13.5),
(145, -3),
(145, -18),
(122, -18),
(98, -3),
(98, 13.5)])
# %%
def raster_to_array(input_raster):
"""
Convert a raster tiff image to a numpy array. Input Requires the
address to the tiff image.
Parameters
----------
input_raster : string
Directory to the raster which should be in tiff format.
Returns
-------
converted_array : numpy array
A numpy array of the input raster.
"""
raster = gdal.Open(input_raster)
band = raster.GetRasterBand(1)
converted_array = band.ReadAsArray()
return converted_array
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/georeferenced')
georeferenced_images = os.listdir()
georeferenced_images.sort()
test = raster_to_array(georeferenced_images[0])
test_extent = map_extent(georeferenced_images[0])
x0, x1 = test_extent[0], test_extent[1]
y0, y1 = test_extent[2], test_extent[3]
def linear_interpolation_of_x_y(georeferenced_array, extent_minimum,
extent_maximum):
"""
A rather cluncky method. The purpose is to create an array of
longitude and latitude values that have the same length as the input
georeferenced array. This method linearly interpolates the longitude
and latitude values.
Parameters
----------
georeferenced_array : 2D Array
DESCRIPTION.
loc0 : float or integer
The first extent value of the georeferenced array.
loc1 : float or integer
The second extent value of the georeferenced array.
Returns
-------
interpolated_coordinates : 1D array
Interpolated latitude or longitude values for the length of the
georeferenced array.
"""
# Extracts axis 1 (columns) from the input array.
# This represents the longitude.
if extent_minimum == x0:
inn = georeferenced_array[0, :]
# Extracts axis 0 (rows) from the input array.
# This represents the latitude.
elif extent_minimum == y0:
inn = georeferenced_array[:, 0]
#
linear_interpolation = [((i-0)*(extent_maximum-extent_minimum)/(
(len(inn)-1)-0)+extent_minimum) for i, r in enumerate(inn)]
# Claculates the difference between the value in front and the value
# behind in the list
difference = [y - x for x, y in zip(linear_interpolation,
linear_interpolation[1:])]
# Calculates the size of each array so to compare it to the size of the
# input array.
array_length = [np.size(np.arange(
extent_minimum, extent_maximum, i)) for i in difference]
# Select values that only match the longitude/latitude length then return
# the first index in the list of matched values.
# This list is a list of indexes that correspond to the index in the
# variable difference.
index_of_correct_value = [i for i, v in enumerate(
array_length) if v == len(inn)][0]
#
interpolated_coordinates = np.arange(extent_minimum,
extent_maximum,
difference[index_of_correct_value])
return interpolated_coordinates
x_longitude = linear_interpolation_of_x_y(test, x0, x1)
y_latitude = linear_interpolation_of_x_y(test, y0, y1)
xx_longitude, yy_longitude = np.meshgrid(x_longitude, y_latitude[::-1])
mask = shapely.vectorized.contains(irregular_study_area,
xx_longitude,
yy_longitude)
def mask_and_binarize(polygon_mask, area_of_interest_raster, threshold):
raster_array = raster_to_array(area_of_interest_raster)
# Pixels outside of the polygon are assigned nan values.
masked = np.where(mask == True, raster_array, np.nan)
binerized_array = np.where(masked >= threshold, 255, 0)
box_top, box_bottom, box_left, box_right = 616, 649, 637, 681
# Draw hollow rectangle with 2px border width on left and 1px for rest.
# -9999 is a random value I chose. Easier to detect in image.
binerized_array[box_top:box_bottom, box_left:box_left+2] = -9999
binerized_array[box_top:box_bottom, box_right-1:box_right] = -9999
binerized_array[box_top:box_top+1, box_left:box_right] = -9999
binerized_array[box_bottom-1:box_bottom, box_left:box_right] = -9999
# If pixels are not equal to -9999 keep the pixel value.
# Pixels that are equal to -9999 are assigned 'nan'.
binerized_array = np.where(binerized_array != -9999, binerized_array, np.nan)
binerized_array = np.ma.array(binerized_array, mask=np.isnan(masked))
return binerized_array
sample_mask = mask_and_binarize(mask, georeferenced_images[0], 150)
# %% Recreating the sea_land_ratio method
def sea_land_ratio_calculation(masked_array, box_perimeter_fill_value):
cleaned_array = np.nan_to_num(masked_array,
copy=True,
nan=box_perimeter_fill_value,
posinf=None,
neginf=None)
image_pixels = cleaned_array.count()
image_nans = np.isnan(cleaned_array).sum()
non_nan_pixels = image_pixels-image_nans
land_pixels = np.count_nonzero(cleaned_array == 255)
ocean_pixels = np.count_nonzero(cleaned_array == 0)
if type(box_perimeter_fill_value) == float:
land_percentage = round((land_pixels/non_nan_pixels)*100, 4)
ocean_percentage = round((ocean_pixels/non_nan_pixels)*100, 4)
elif type(box_perimeter_fill_value) == int:
land_percentage = round((land_pixels/image_pixels)*100, 4)
ocean_percentage = round((ocean_pixels/image_pixels)*100, 4)
land_ocean_ratio = round(land_percentage/ocean_percentage, 4)
return land_percentage, ocean_percentage, land_ocean_ratio
# %% testing the sea_land_ratio definition
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/georeferenced')
# apply_box_perimeter_mask = 'yes'
box_perimeter_fill_value = np.nan
cleaned_array = np.nan_to_num(sample_mask,
copy=True,
nan=box_perimeter_fill_value,
posinf=None,
neginf=None)
image_pixels = cleaned_array.count()
image_nans = np.isnan(cleaned_array).sum()
non_nan_pixels = image_pixels-image_nans
land_pixels = np.count_nonzero(cleaned_array == 255)
ocean_pixels = np.count_nonzero(cleaned_array == 0)
if type(box_perimeter_fill_value) == float:
|
elif type(box_perimeter_fill_value) == int:
land_percentage = round((land_pixels/image_pixels)*100, 4)
ocean_percentage = round((ocean_pixels/image_pixels)*100, 4)
# land_percentage = round((land_pixels/image_pixels)*100, 4)
# ocean_percentage = round((ocean_pixels/image_pixels)*100, 4)
land_ocean_ratio = round(land_percentage/ocean_percentage, 10)
print(f'{box_perimeter_fill_value}', land_ocean_ratio)
map_projection = ccrs.PlateCarree()
fig, axes = plt.subplots(n
|
land_percentage = round((land_pixels/non_nan_pixels)*100, 4)
ocean_percentage = round((ocean_pixels/non_nan_pixels)*100, 4)
|
conditional_block
|
land_ocean_ratio.py
|
0, 0, right_x, bottom_y),
gdal.GCP(0, -60, 0, left_x, bottom_y)]
ds.SetProjection(sr.ExportToWkt())
wkt = ds.GetProjection()
ds.SetGCPs(gcps, wkt)
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/georeferenced')
gdal.Warp(f"{tif_image}.tif", ds, dstSRS='EPSG:4326', format='gtiff')
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/Images2')
os.remove('temporary.tif')
ds= None
if georeference_images == 'yes':
for i, w in zip(image_names, image_names_no_extension):
georeferenced_images(i, w)
# %%
# starts in the top left going clockwise finishing at top left (x, y).
# coordinates in decimal degrees.
irregular_study_area = Polygon([(98, 13.5),
(125, 13.5),
(145, -3),
(145, -18),
(122, -18),
(98, -3),
(98, 13.5)])
# %%
def raster_to_array(input_raster):
"""
Convert a raster tiff image to a numpy array. Input Requires the
address to the tiff image.
Parameters
----------
input_raster : string
Directory to the raster which should be in tiff format.
Returns
-------
converted_array : numpy array
A numpy array of the input raster.
"""
raster = gdal.Open(input_raster)
band = raster.GetRasterBand(1)
converted_array = band.ReadAsArray()
return converted_array
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/georeferenced')
georeferenced_images = os.listdir()
georeferenced_images.sort()
test = raster_to_array(georeferenced_images[0])
test_extent = map_extent(georeferenced_images[0])
x0, x1 = test_extent[0], test_extent[1]
y0, y1 = test_extent[2], test_extent[3]
def linear_interpolation_of_x_y(georeferenced_array, extent_minimum,
extent_maximum):
"""
A rather cluncky method. The purpose is to create an array of
longitude and latitude values that have the same length as the input
georeferenced array. This method linearly interpolates the longitude
and latitude values.
Parameters
----------
georeferenced_array : 2D Array
DESCRIPTION.
loc0 : float or integer
The first extent value of the georeferenced array.
loc1 : float or integer
The second extent value of the georeferenced array.
Returns
-------
interpolated_coordinates : 1D array
Interpolated latitude or longitude values for the length of the
georeferenced array.
"""
# Extracts axis 1 (columns) from the input array.
# This represents the longitude.
if extent_minimum == x0:
inn = georeferenced_array[0, :]
# Extracts axis 0 (rows) from the input array.
# This represents the latitude.
elif extent_minimum == y0:
inn = georeferenced_array[:, 0]
#
linear_interpolation = [((i-0)*(extent_maximum-extent_minimum)/(
(len(inn)-1)-0)+extent_minimum) for i, r in enumerate(inn)]
# Claculates the difference between the value in front and the value
# behind in the list
difference = [y - x for x, y in zip(linear_interpolation,
linear_interpolation[1:])]
# Calculates the size of each array so to compare it to the size of the
# input array.
array_length = [np.size(np.arange(
extent_minimum, extent_maximum, i)) for i in difference]
# Select values that only match the longitude/latitude length then return
# the first index in the list of matched values.
# This list is a list of indexes that correspond to the index in the
# variable difference.
index_of_correct_value = [i for i, v in enumerate(
array_length) if v == len(inn)][0]
#
interpolated_coordinates = np.arange(extent_minimum,
extent_maximum,
difference[index_of_correct_value])
return interpolated_coordinates
x_longitude = linear_interpolation_of_x_y(test, x0, x1)
y_latitude = linear_interpolation_of_x_y(test, y0, y1)
xx_longitude, yy_longitude = np.meshgrid(x_longitude, y_latitude[::-1])
mask = shapely.vectorized.contains(irregular_study_area,
xx_longitude,
yy_longitude)
def mask_and_binarize(polygon_mask, area_of_interest_raster, threshold):
raster_array = raster_to_array(area_of_interest_raster)
# Pixels outside of the polygon are assigned nan values.
masked = np.where(mask == True, raster_array, np.nan)
binerized_array = np.where(masked >= threshold, 255, 0)
box_top, box_bottom, box_left, box_right = 616, 649, 637, 681
# Draw hollow rectangle with 2px border width on left and 1px for rest.
# -9999 is a random value I chose. Easier to detect in image.
binerized_array[box_top:box_bottom, box_left:box_left+2] = -9999
binerized_array[box_top:box_bottom, box_right-1:box_right] = -9999
binerized_array[box_top:box_top+1, box_left:box_right] = -9999
binerized_array[box_bottom-1:box_bottom, box_left:box_right] = -9999
# If pixels are not equal to -9999 keep the pixel value.
# Pixels that are equal to -9999 are assigned 'nan'.
binerized_array = np.where(binerized_array != -9999, binerized_array, np.nan)
binerized_array = np.ma.array(binerized_array, mask=np.isnan(masked))
return binerized_array
sample_mask = mask_and_binarize(mask, georeferenced_images[0], 150)
# %% Recreating the sea_land_ratio method
def sea_land_ratio_calculation(masked_array, box_perimeter_fill_value):
cleaned_array = np.nan_to_num(masked_array,
copy=True,
nan=box_perimeter_fill_value,
posinf=None,
neginf=None)
image_pixels = cleaned_array.count()
image_nans = np.isnan(cleaned_array).sum()
non_nan_pixels = image_pixels-image_nans
land_pixels = np.count_nonzero(cleaned_array == 255)
ocean_pixels = np.count_nonzero(cleaned_array == 0)
if type(box_perimeter_fill_value) == float:
land_percentage = round((land_pixels/non_nan_pixels)*100, 4)
ocean_percentage = round((ocean_pixels/non_nan_pixels)*100, 4)
elif type(box_perimeter_fill_value) == int:
land_percentage = round((land_pixels/image_pixels)*100, 4)
ocean_percentage = round((ocean_pixels/image_pixels)*100, 4)
land_ocean_ratio = round(land_percentage/ocean_percentage, 4)
return land_percentage, ocean_percentage, land_ocean_ratio
# %% testing the sea_land_ratio definition
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/georeferenced')
# apply_box_perimeter_mask = 'yes'
box_perimeter_fill_value = np.nan
cleaned_array = np.nan_to_num(sample_mask,
copy=True,
nan=box_perimeter_fill_value,
posinf=None,
neginf=None)
image_pixels = cleaned_array.count()
image_nans = np.isnan(cleaned_array).sum()
non_nan_pixels = image_pixels-image_nans
land_pixels = np.count_nonzero(cleaned_array == 255)
ocean_pixels = np.count_nonzero(cleaned_array == 0)
if type(box_perimeter_fill_value) == float:
land_percentage = round((land_pixels/non_nan_pixels)*100, 4)
ocean_percentage = round((ocean_pixels/non_nan_pixels)*100, 4)
elif type(box_perimeter_fill_value) == int:
land_percentage = round((land_pixels/image_pixels)*100, 4)
|
land_ocean_ratio = round(land_percentage/ocean_percentage, 10)
print(f'{box_perimeter_fill_value}', land_ocean_ratio)
map_projection = ccrs.PlateCarree()
fig, axes = plt.subplots(n
|
ocean_percentage = round((ocean_pixels/image_pixels)*100, 4)
# land_percentage = round((land_pixels/image_pixels)*100, 4)
# ocean_percentage = round((ocean_pixels/image_pixels)*100, 4)
|
random_line_split
|
land_ocean_ratio.py
|
60, 0, right_x, bottom_y),
gdal.GCP(0, -60, 0, left_x, bottom_y)]
ds.SetProjection(sr.ExportToWkt())
wkt = ds.GetProjection()
ds.SetGCPs(gcps, wkt)
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/georeferenced')
gdal.Warp(f"{tif_image}.tif", ds, dstSRS='EPSG:4326', format='gtiff')
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/Images2')
os.remove('temporary.tif')
ds= None
if georeference_images == 'yes':
for i, w in zip(image_names, image_names_no_extension):
georeferenced_images(i, w)
# %%
# starts in the top left going clockwise finishing at top left (x, y).
# coordinates in decimal degrees.
irregular_study_area = Polygon([(98, 13.5),
(125, 13.5),
(145, -3),
(145, -18),
(122, -18),
(98, -3),
(98, 13.5)])
# %%
def raster_to_array(input_raster):
|
return converted_array
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/georeferenced')
georeferenced_images = os.listdir()
georeferenced_images.sort()
test = raster_to_array(georeferenced_images[0])
test_extent = map_extent(georeferenced_images[0])
x0, x1 = test_extent[0], test_extent[1]
y0, y1 = test_extent[2], test_extent[3]
def linear_interpolation_of_x_y(georeferenced_array, extent_minimum,
extent_maximum):
"""
A rather cluncky method. The purpose is to create an array of
longitude and latitude values that have the same length as the input
georeferenced array. This method linearly interpolates the longitude
and latitude values.
Parameters
----------
georeferenced_array : 2D Array
DESCRIPTION.
loc0 : float or integer
The first extent value of the georeferenced array.
loc1 : float or integer
The second extent value of the georeferenced array.
Returns
-------
interpolated_coordinates : 1D array
Interpolated latitude or longitude values for the length of the
georeferenced array.
"""
# Extracts axis 1 (columns) from the input array.
# This represents the longitude.
if extent_minimum == x0:
inn = georeferenced_array[0, :]
# Extracts axis 0 (rows) from the input array.
# This represents the latitude.
elif extent_minimum == y0:
inn = georeferenced_array[:, 0]
#
linear_interpolation = [((i-0)*(extent_maximum-extent_minimum)/(
(len(inn)-1)-0)+extent_minimum) for i, r in enumerate(inn)]
# Claculates the difference between the value in front and the value
# behind in the list
difference = [y - x for x, y in zip(linear_interpolation,
linear_interpolation[1:])]
# Calculates the size of each array so to compare it to the size of the
# input array.
array_length = [np.size(np.arange(
extent_minimum, extent_maximum, i)) for i in difference]
# Select values that only match the longitude/latitude length then return
# the first index in the list of matched values.
# This list is a list of indexes that correspond to the index in the
# variable difference.
index_of_correct_value = [i for i, v in enumerate(
array_length) if v == len(inn)][0]
#
interpolated_coordinates = np.arange(extent_minimum,
extent_maximum,
difference[index_of_correct_value])
return interpolated_coordinates
x_longitude = linear_interpolation_of_x_y(test, x0, x1)
y_latitude = linear_interpolation_of_x_y(test, y0, y1)
xx_longitude, yy_longitude = np.meshgrid(x_longitude, y_latitude[::-1])
mask = shapely.vectorized.contains(irregular_study_area,
xx_longitude,
yy_longitude)
def mask_and_binarize(polygon_mask, area_of_interest_raster, threshold):
raster_array = raster_to_array(area_of_interest_raster)
# Pixels outside of the polygon are assigned nan values.
masked = np.where(mask == True, raster_array, np.nan)
binerized_array = np.where(masked >= threshold, 255, 0)
box_top, box_bottom, box_left, box_right = 616, 649, 637, 681
# Draw hollow rectangle with 2px border width on left and 1px for rest.
# -9999 is a random value I chose. Easier to detect in image.
binerized_array[box_top:box_bottom, box_left:box_left+2] = -9999
binerized_array[box_top:box_bottom, box_right-1:box_right] = -9999
binerized_array[box_top:box_top+1, box_left:box_right] = -9999
binerized_array[box_bottom-1:box_bottom, box_left:box_right] = -9999
# If pixels are not equal to -9999 keep the pixel value.
# Pixels that are equal to -9999 are assigned 'nan'.
binerized_array = np.where(binerized_array != -9999, binerized_array, np.nan)
binerized_array = np.ma.array(binerized_array, mask=np.isnan(masked))
return binerized_array
sample_mask = mask_and_binarize(mask, georeferenced_images[0], 150)
# %% Recreating the sea_land_ratio method
def sea_land_ratio_calculation(masked_array, box_perimeter_fill_value):
cleaned_array = np.nan_to_num(masked_array,
copy=True,
nan=box_perimeter_fill_value,
posinf=None,
neginf=None)
image_pixels = cleaned_array.count()
image_nans = np.isnan(cleaned_array).sum()
non_nan_pixels = image_pixels-image_nans
land_pixels = np.count_nonzero(cleaned_array == 255)
ocean_pixels = np.count_nonzero(cleaned_array == 0)
if type(box_perimeter_fill_value) == float:
land_percentage = round((land_pixels/non_nan_pixels)*100, 4)
ocean_percentage = round((ocean_pixels/non_nan_pixels)*100, 4)
elif type(box_perimeter_fill_value) == int:
land_percentage = round((land_pixels/image_pixels)*100, 4)
ocean_percentage = round((ocean_pixels/image_pixels)*100, 4)
land_ocean_ratio = round(land_percentage/ocean_percentage, 4)
return land_percentage, ocean_percentage, land_ocean_ratio
# %% testing the sea_land_ratio definition
os.chdir(r'/home/huw/Dropbox/Sophie/SeaLevelChange/georeferenced')
# apply_box_perimeter_mask = 'yes'
box_perimeter_fill_value = np.nan
cleaned_array = np.nan_to_num(sample_mask,
copy=True,
nan=box_perimeter_fill_value,
posinf=None,
neginf=None)
image_pixels = cleaned_array.count()
image_nans = np.isnan(cleaned_array).sum()
non_nan_pixels = image_pixels-image_nans
land_pixels = np.count_nonzero(cleaned_array == 255)
ocean_pixels = np.count_nonzero(cleaned_array == 0)
if type(box_perimeter_fill_value) == float:
land_percentage = round((land_pixels/non_nan_pixels)*100, 4)
ocean_percentage = round((ocean_pixels/non_nan_pixels)*100, 4)
elif type(box_perimeter_fill_value) == int:
land_percentage = round((land_pixels/image_pixels)*100, 4)
ocean_percentage = round((ocean_pixels/image_pixels)*100, 4)
# land_percentage = round((land_pixels/image_pixels)*100, 4)
# ocean_percentage = round((ocean_pixels/image_pixels)*100, 4)
land_ocean_ratio = round(land_percentage/ocean_percentage, 10)
print(f'{box_perimeter_fill_value}', land_ocean_ratio)
map_projection = ccrs.PlateCarree()
fig, axes = plt.subplots(n
|
"""
Convert a raster tiff image to a numpy array. Input Requires the
address to the tiff image.
Parameters
----------
input_raster : string
Directory to the raster which should be in tiff format.
Returns
-------
converted_array : numpy array
A numpy array of the input raster.
"""
raster = gdal.Open(input_raster)
band = raster.GetRasterBand(1)
converted_array = band.ReadAsArray()
|
identifier_body
|
mod.rs
|
));
Ok(storage)
}
/// Sets up an instance of `Storage`, with git turned on.
pub fn setup_luigi_with_git() -> Result<Storage<Project>> {
trace!("setup_luigi()");
let working = try!(::CONFIG.get_str("dirs/working").ok_or("Faulty config: dirs/working does not contain a value"));
let archive = try!(::CONFIG.get_str("dirs/archive").ok_or("Faulty config: dirs/archive does not contain a value"));
let templates = try!(::CONFIG.get_str("dirs/templates").ok_or("Faulty config: dirs/templates does not contain a value"));
let storage = try!(Storage::new_with_git(util::get_storage_path(), working, archive, templates));
Ok(storage)
}
pub fn simple_with_projects<F>(dir:StorageDir, search_terms:&[&str], f:F)
where F:Fn(&Project)
{
match with_projects(dir, search_terms, |p| {f(p);Ok(())}){
Ok(_) => {},
Err(e) => error!("{}",e)
}
}
/// Helper method that passes projects matching the `search_terms` to the passt closure `f`
pub fn with_projects<F>(dir:StorageDir, search_terms:&[&str], f:F) -> Result<()>
where F:Fn(&Project)->Result<()>
{
trace!("with_projects({:?})", search_terms);
let luigi = try!(setup_luigi());
let projects = try!(luigi.search_projects_any(dir, search_terms));
if projects.is_empty() {
return Err(format!("Nothing found for {:?}", search_terms).into())
}
for project in &projects{
try!(f(project));
}
Ok(())
}
pub fn csv(year:i32) -> Result<String> {
let luigi = try!(setup_luigi());
let mut projects = try!(luigi.open_projects(StorageDir::Year(year)));
projects.sort_by(|pa,pb| pa.index().unwrap_or_else(||"zzzz".to_owned()).cmp( &pb.index().unwrap_or("zzzz".to_owned())));
projects_to_csv(&projects)
}
/// Produces a csv string from a list of `Project`s
/// TODO this still contains german terms
pub fn projects_to_csv(projects:&[Project]) -> Result<String>{
let mut string = String::new();
let splitter = ";";
try!(writeln!(&mut string, "{}", [ "Rnum", "Bezeichnung", "Datum", "Rechnungsdatum", "Betreuer", "Verantwortlich", "Bezahlt am", "Betrag", "Canceled"].join(splitter)));
for project in projects{
try!(writeln!(&mut string, "{}", [
project.get("InvoiceNumber").unwrap_or_else(|| String::from(r#""""#)),
project.get("Name").unwrap_or_else(|| String::from(r#""""#)),
project.get("event/dates/0/begin").unwrap_or_else(|| String::from(r#""""#)),
project.get("invoice/date").unwrap_or_else(|| String::from(r#""""#)),
project.get("Caterers").unwrap_or_else(|| String::from(r#""""#)),
project.get("Responsible").unwrap_or_else(|| String::from(r#""""#)),
project.get("invoice/payed_date").unwrap_or_else(|| String::from(r#""""#)),
project.get("Final").unwrap_or_else(|| String::from(r#""""#)),
project.canceled_string().to_owned()
].join(splitter)));
}
Ok(string)
}
/// Creates the latex files within each projects directory, either for Invoice or Offer.
#[cfg(feature="document_export")]
pub fn project_to_doc(project: &Project, template_name:&str, bill_type:&Option<BillType>, dry_run:bool, force:bool) -> Result<()> {
let template_ext = ::CONFIG.get_str("extensions/output_template").expect("Faulty default config");
let output_ext = ::CONFIG.get_str("extensions/output_file").expect("Faulty default config");
let convert_ext = ::CONFIG.get_str("convert/output_extension").expect("Faulty default config");
let trash_exts = ::CONFIG.get("convert/trash_extensions") .expect("Faulty default config")
.as_vec().expect("Faulty default config")
.into_iter()
.map(|v|v.as_str()).collect::<Vec<_>>();
let mut template_path = PathBuf::new();
template_path.push(util::get_storage_path());
template_path.push(::CONFIG.get_str("dirs/templates").expect("Faulty config: dirs/templates does not contain a value"));
template_path.push(template_name);
template_path.set_extension(template_ext);
debug!("template file={:?} exists={}", template_path, template_path.exists());
if !template_path.exists() {
return Err(format!("Template not found at {}", template_path.display()).into())
}
let convert_tool = ::CONFIG.get_str("convert/tool");
let output_folder = ::CONFIG.get_str("output_path").and_then(util::get_valid_path).expect("Faulty config \"output_path\"");
let ready_for_offer = project.is_ready_for_offer();
let ready_for_invoice = project.is_ready_for_invoice();
let project_file = project.file();
// tiny little helper
let to_local_file = |file:&Path, ext| {
let mut _tmpfile = file.to_owned();
_tmpfile.set_extension(ext);
Path::new(_tmpfile.file_name().unwrap().into()).to_owned()
};
use BillType::*;
let (dyn_bill_type, outfile_tex):
(Option<BillType>, Option<PathBuf>) =
match (bill_type, ready_for_offer, ready_for_invoice)
{
(&Some(Offer), Ok(_), _ ) |
(&None, Ok(_), Err(_)) => (Some(Offer), Some(project.dir().join(project.offer_file_name(output_ext).expect("this should have been cought by ready_for_offer()")))),
(&Some(Invoice), _, Ok(_)) |
(&None, _, Ok(_)) => (Some(Invoice), Some(project.dir().join(project.invoice_file_name(output_ext).expect("this should have been cought by ready_for_invoice()")))),
(&Some(Offer), Err(e), _ ) => {error!("cannot create an offer, check out:{:#?}",e);(None,None)},
(&Some(Invoice), _, Err(e)) => {error!("cannot create an invoice, check out:{:#?}",e);(None,None)},
(_, Err(e), Err(_)) => {error!("Neither an Offer nor an Invoice can be created from this project\n please check out {:#?}", e);(None,None)}
};
//debug!("{:?} -> {:?}",(bill_type, project.is_ready_for_offer(), project.is_ready_for_invoice()), (dyn_bill_type, outfile_tex));
if let (Some(outfile), Some(dyn_bill)) = (outfile_tex, dyn_bill_type) {
let filled = try!(fill_template(project, &dyn_bill, &template_path));
let pdffile = to_local_file(&outfile, convert_ext);
let target = output_folder.join(&pdffile);
// ok, so apparently we can create a tex file, so lets do it
if !force && target.exists() && try!(file_age(&target)) < try!(file_age(&project_file)){
// no wait, nothing has changed, so lets save ourselves the work
info!("nothing to be done, {} is younger than {}\n use -f if you don't agree", target.display(), project_file.display());
} else {
// \o/ we created a tex file
if dry_run{
warn!("Dry run! This does not produce any output:\n * {}\n * {}", outfile.display(), pdffile.display());
} else {
let outfileb = try!(project.write_to_file(&filled,&dyn_bill,output_ext));
debug!("{} vs\n {}", outfile.display(), outfileb.display());
util::pass_to_command(&convert_tool, &[&outfileb]);
}
// clean up expected trash files
for trash_ext in trash_exts.iter().filter_map(|x|*x){
let trash_file = to_local_file(&outfile, trash_ext);
if trash_file.exists() {
try!(fs::remove_file(&trash_file));
debug!("just deleted: {}", trash_file.display())
}
else {
debug!("I expected there to be a {}, but there wasn't any ?", trash_file.display())
}
}
if pdffile.exists(){
debug!("now there is be a {:?} -> {:?}", pdffile, target);
try!(fs::rename(&pdffile, &target));
}
}
}
Ok(())
}
/// Creates the latex files within each projects directory, either for Invoice or Offer.
#[cfg(feature="document_export")]
pub fn projects_to_doc(dir:StorageDir, search_term:&str, template_name:&str, bill_type:&Option<BillType>, dry_run:bool, force:bool) -> Result<()> {
with_projects(dir, &[search_term], |p| project_to_doc(p, template_name, bill_type, dry_run, force) )
}
fn file_age(path:&Path) -> Result<time::Duration>
|
/// Testing only,
|
{
let metadata = try!(fs::metadata(path));
let accessed = try!(metadata.accessed());
Ok(try!(accessed.elapsed()))
}
|
identifier_body
|
mod.rs
|
fn projects_to_csv(projects:&[Project]) -> Result<String>{
let mut string = String::new();
let splitter = ";";
try!(writeln!(&mut string, "{}", [ "Rnum", "Bezeichnung", "Datum", "Rechnungsdatum", "Betreuer", "Verantwortlich", "Bezahlt am", "Betrag", "Canceled"].join(splitter)));
for project in projects{
try!(writeln!(&mut string, "{}", [
project.get("InvoiceNumber").unwrap_or_else(|| String::from(r#""""#)),
project.get("Name").unwrap_or_else(|| String::from(r#""""#)),
project.get("event/dates/0/begin").unwrap_or_else(|| String::from(r#""""#)),
project.get("invoice/date").unwrap_or_else(|| String::from(r#""""#)),
project.get("Caterers").unwrap_or_else(|| String::from(r#""""#)),
project.get("Responsible").unwrap_or_else(|| String::from(r#""""#)),
project.get("invoice/payed_date").unwrap_or_else(|| String::from(r#""""#)),
project.get("Final").unwrap_or_else(|| String::from(r#""""#)),
project.canceled_string().to_owned()
].join(splitter)));
}
Ok(string)
}
/// Creates the latex files within each projects directory, either for Invoice or Offer.
#[cfg(feature="document_export")]
pub fn project_to_doc(project: &Project, template_name:&str, bill_type:&Option<BillType>, dry_run:bool, force:bool) -> Result<()> {
let template_ext = ::CONFIG.get_str("extensions/output_template").expect("Faulty default config");
let output_ext = ::CONFIG.get_str("extensions/output_file").expect("Faulty default config");
let convert_ext = ::CONFIG.get_str("convert/output_extension").expect("Faulty default config");
let trash_exts = ::CONFIG.get("convert/trash_extensions") .expect("Faulty default config")
.as_vec().expect("Faulty default config")
.into_iter()
.map(|v|v.as_str()).collect::<Vec<_>>();
let mut template_path = PathBuf::new();
template_path.push(util::get_storage_path());
template_path.push(::CONFIG.get_str("dirs/templates").expect("Faulty config: dirs/templates does not contain a value"));
template_path.push(template_name);
template_path.set_extension(template_ext);
debug!("template file={:?} exists={}", template_path, template_path.exists());
if !template_path.exists() {
return Err(format!("Template not found at {}", template_path.display()).into())
}
let convert_tool = ::CONFIG.get_str("convert/tool");
let output_folder = ::CONFIG.get_str("output_path").and_then(util::get_valid_path).expect("Faulty config \"output_path\"");
let ready_for_offer = project.is_ready_for_offer();
let ready_for_invoice = project.is_ready_for_invoice();
let project_file = project.file();
// tiny little helper
let to_local_file = |file:&Path, ext| {
let mut _tmpfile = file.to_owned();
_tmpfile.set_extension(ext);
Path::new(_tmpfile.file_name().unwrap().into()).to_owned()
};
use BillType::*;
let (dyn_bill_type, outfile_tex):
(Option<BillType>, Option<PathBuf>) =
match (bill_type, ready_for_offer, ready_for_invoice)
{
(&Some(Offer), Ok(_), _ ) |
(&None, Ok(_), Err(_)) => (Some(Offer), Some(project.dir().join(project.offer_file_name(output_ext).expect("this should have been cought by ready_for_offer()")))),
(&Some(Invoice), _, Ok(_)) |
(&None, _, Ok(_)) => (Some(Invoice), Some(project.dir().join(project.invoice_file_name(output_ext).expect("this should have been cought by ready_for_invoice()")))),
(&Some(Offer), Err(e), _ ) => {error!("cannot create an offer, check out:{:#?}",e);(None,None)},
(&Some(Invoice), _, Err(e)) => {error!("cannot create an invoice, check out:{:#?}",e);(None,None)},
(_, Err(e), Err(_)) => {error!("Neither an Offer nor an Invoice can be created from this project\n please check out {:#?}", e);(None,None)}
};
//debug!("{:?} -> {:?}",(bill_type, project.is_ready_for_offer(), project.is_ready_for_invoice()), (dyn_bill_type, outfile_tex));
if let (Some(outfile), Some(dyn_bill)) = (outfile_tex, dyn_bill_type) {
let filled = try!(fill_template(project, &dyn_bill, &template_path));
let pdffile = to_local_file(&outfile, convert_ext);
let target = output_folder.join(&pdffile);
// ok, so apparently we can create a tex file, so lets do it
if !force && target.exists() && try!(file_age(&target)) < try!(file_age(&project_file)){
// no wait, nothing has changed, so lets save ourselves the work
info!("nothing to be done, {} is younger than {}\n use -f if you don't agree", target.display(), project_file.display());
} else {
// \o/ we created a tex file
if dry_run{
warn!("Dry run! This does not produce any output:\n * {}\n * {}", outfile.display(), pdffile.display());
} else {
let outfileb = try!(project.write_to_file(&filled,&dyn_bill,output_ext));
debug!("{} vs\n {}", outfile.display(), outfileb.display());
util::pass_to_command(&convert_tool, &[&outfileb]);
}
// clean up expected trash files
for trash_ext in trash_exts.iter().filter_map(|x|*x){
let trash_file = to_local_file(&outfile, trash_ext);
if trash_file.exists() {
try!(fs::remove_file(&trash_file));
debug!("just deleted: {}", trash_file.display())
}
else {
debug!("I expected there to be a {}, but there wasn't any ?", trash_file.display())
}
}
if pdffile.exists(){
debug!("now there is be a {:?} -> {:?}", pdffile, target);
try!(fs::rename(&pdffile, &target));
}
}
}
Ok(())
}
/// Creates the latex files within each projects directory, either for Invoice or Offer.
#[cfg(feature="document_export")]
pub fn projects_to_doc(dir:StorageDir, search_term:&str, template_name:&str, bill_type:&Option<BillType>, dry_run:bool, force:bool) -> Result<()> {
with_projects(dir, &[search_term], |p| project_to_doc(p, template_name, bill_type, dry_run, force) )
}
fn file_age(path:&Path) -> Result<time::Duration> {
let metadata = try!(fs::metadata(path));
let accessed = try!(metadata.accessed());
Ok(try!(accessed.elapsed()))
}
/// Testing only, tries to run complete spec on all projects.
/// TODO make this not panic :D
/// TODO move this to `spec::all_the_things`
pub fn spec() -> Result<()> {
use project::spec::*;
let luigi = try!(setup_luigi());
//let projects = super::execute(||luigi.open_projects(StorageDir::All));
let projects = try!(luigi.open_projects(StorageDir::Working));
for project in projects{
info!("{}", project.dir().display());
let yaml = project.yaml();
client::validate(&yaml).map_err(|errors|for error in errors{
println!(" error: {}", error);
}).unwrap();
client::full_name(&yaml);
client::first_name(&yaml);
client::title(&yaml);
client::email(&yaml);
hours::caterers_string(&yaml);
invoice::number_long_str(&yaml);
invoice::number_str(&yaml);
offer::number(&yaml);
project.age().map(|a|format!("{} days", a)).unwrap();
project.date().map(|d|d.year().to_string()).unwrap();
project.sum_sold().map(|c|util::currency_to_string(&c)).unwrap();
project::manager(&yaml).map(|s|s.to_owned()).unwrap();
project::name(&yaml).map(|s|s.to_owned()).unwrap();
}
Ok(())
}
pub fn delete_project_confirmation(dir: StorageDir, search_terms:&[&str]) -> Result<()> {
let luigi = try!(setup_luigi());
for project in try!(luigi.search_projects_any(dir, search_terms)) {
try!(project.delete_project_dir_if(
|| util::really(&format!("you want me to delete {:?} [y/N]", project.dir())) && util::really("really? [y/N]")
))
}
Ok(())
}
pub fn archive_projects(search_terms:&[&str], manual_year:Option<i32>, force:bool) -> Result<Vec<PathBuf>>{
trace!("archive_projects matching ({:?},{:?},{:?})", search_terms, manual_year,force);
let luigi = try!(setup_luigi_with_git());
Ok(try!( luigi.archive_projects_if(search_terms, manual_year, || force) ))
|
random_line_split
|
||
mod.rs
|
));
Ok(storage)
}
/// Sets up an instance of `Storage`, with git turned on.
pub fn setup_luigi_with_git() -> Result<Storage<Project>> {
trace!("setup_luigi()");
let working = try!(::CONFIG.get_str("dirs/working").ok_or("Faulty config: dirs/working does not contain a value"));
let archive = try!(::CONFIG.get_str("dirs/archive").ok_or("Faulty config: dirs/archive does not contain a value"));
let templates = try!(::CONFIG.get_str("dirs/templates").ok_or("Faulty config: dirs/templates does not contain a value"));
let storage = try!(Storage::new_with_git(util::get_storage_path(), working, archive, templates));
Ok(storage)
}
pub fn simple_with_projects<F>(dir:StorageDir, search_terms:&[&str], f:F)
where F:Fn(&Project)
{
match with_projects(dir, search_terms, |p| {f(p);Ok(())}){
Ok(_) => {},
Err(e) => error!("{}",e)
}
}
/// Helper method that passes projects matching the `search_terms` to the passt closure `f`
pub fn with_projects<F>(dir:StorageDir, search_terms:&[&str], f:F) -> Result<()>
where F:Fn(&Project)->Result<()>
{
trace!("with_projects({:?})", search_terms);
let luigi = try!(setup_luigi());
let projects = try!(luigi.search_projects_any(dir, search_terms));
if projects.is_empty() {
return Err(format!("Nothing found for {:?}", search_terms).into())
}
for project in &projects{
try!(f(project));
}
Ok(())
}
pub fn csv(year:i32) -> Result<String> {
let luigi = try!(setup_luigi());
let mut projects = try!(luigi.open_projects(StorageDir::Year(year)));
projects.sort_by(|pa,pb| pa.index().unwrap_or_else(||"zzzz".to_owned()).cmp( &pb.index().unwrap_or("zzzz".to_owned())));
projects_to_csv(&projects)
}
/// Produces a csv string from a list of `Project`s
/// TODO this still contains german terms
pub fn
|
(projects:&[Project]) -> Result<String>{
let mut string = String::new();
let splitter = ";";
try!(writeln!(&mut string, "{}", [ "Rnum", "Bezeichnung", "Datum", "Rechnungsdatum", "Betreuer", "Verantwortlich", "Bezahlt am", "Betrag", "Canceled"].join(splitter)));
for project in projects{
try!(writeln!(&mut string, "{}", [
project.get("InvoiceNumber").unwrap_or_else(|| String::from(r#""""#)),
project.get("Name").unwrap_or_else(|| String::from(r#""""#)),
project.get("event/dates/0/begin").unwrap_or_else(|| String::from(r#""""#)),
project.get("invoice/date").unwrap_or_else(|| String::from(r#""""#)),
project.get("Caterers").unwrap_or_else(|| String::from(r#""""#)),
project.get("Responsible").unwrap_or_else(|| String::from(r#""""#)),
project.get("invoice/payed_date").unwrap_or_else(|| String::from(r#""""#)),
project.get("Final").unwrap_or_else(|| String::from(r#""""#)),
project.canceled_string().to_owned()
].join(splitter)));
}
Ok(string)
}
/// Creates the latex files within each projects directory, either for Invoice or Offer.
#[cfg(feature="document_export")]
pub fn project_to_doc(project: &Project, template_name:&str, bill_type:&Option<BillType>, dry_run:bool, force:bool) -> Result<()> {
let template_ext = ::CONFIG.get_str("extensions/output_template").expect("Faulty default config");
let output_ext = ::CONFIG.get_str("extensions/output_file").expect("Faulty default config");
let convert_ext = ::CONFIG.get_str("convert/output_extension").expect("Faulty default config");
let trash_exts = ::CONFIG.get("convert/trash_extensions") .expect("Faulty default config")
.as_vec().expect("Faulty default config")
.into_iter()
.map(|v|v.as_str()).collect::<Vec<_>>();
let mut template_path = PathBuf::new();
template_path.push(util::get_storage_path());
template_path.push(::CONFIG.get_str("dirs/templates").expect("Faulty config: dirs/templates does not contain a value"));
template_path.push(template_name);
template_path.set_extension(template_ext);
debug!("template file={:?} exists={}", template_path, template_path.exists());
if !template_path.exists() {
return Err(format!("Template not found at {}", template_path.display()).into())
}
let convert_tool = ::CONFIG.get_str("convert/tool");
let output_folder = ::CONFIG.get_str("output_path").and_then(util::get_valid_path).expect("Faulty config \"output_path\"");
let ready_for_offer = project.is_ready_for_offer();
let ready_for_invoice = project.is_ready_for_invoice();
let project_file = project.file();
// tiny little helper
let to_local_file = |file:&Path, ext| {
let mut _tmpfile = file.to_owned();
_tmpfile.set_extension(ext);
Path::new(_tmpfile.file_name().unwrap().into()).to_owned()
};
use BillType::*;
let (dyn_bill_type, outfile_tex):
(Option<BillType>, Option<PathBuf>) =
match (bill_type, ready_for_offer, ready_for_invoice)
{
(&Some(Offer), Ok(_), _ ) |
(&None, Ok(_), Err(_)) => (Some(Offer), Some(project.dir().join(project.offer_file_name(output_ext).expect("this should have been cought by ready_for_offer()")))),
(&Some(Invoice), _, Ok(_)) |
(&None, _, Ok(_)) => (Some(Invoice), Some(project.dir().join(project.invoice_file_name(output_ext).expect("this should have been cought by ready_for_invoice()")))),
(&Some(Offer), Err(e), _ ) => {error!("cannot create an offer, check out:{:#?}",e);(None,None)},
(&Some(Invoice), _, Err(e)) => {error!("cannot create an invoice, check out:{:#?}",e);(None,None)},
(_, Err(e), Err(_)) => {error!("Neither an Offer nor an Invoice can be created from this project\n please check out {:#?}", e);(None,None)}
};
//debug!("{:?} -> {:?}",(bill_type, project.is_ready_for_offer(), project.is_ready_for_invoice()), (dyn_bill_type, outfile_tex));
if let (Some(outfile), Some(dyn_bill)) = (outfile_tex, dyn_bill_type) {
let filled = try!(fill_template(project, &dyn_bill, &template_path));
let pdffile = to_local_file(&outfile, convert_ext);
let target = output_folder.join(&pdffile);
// ok, so apparently we can create a tex file, so lets do it
if !force && target.exists() && try!(file_age(&target)) < try!(file_age(&project_file)){
// no wait, nothing has changed, so lets save ourselves the work
info!("nothing to be done, {} is younger than {}\n use -f if you don't agree", target.display(), project_file.display());
} else {
// \o/ we created a tex file
if dry_run{
warn!("Dry run! This does not produce any output:\n * {}\n * {}", outfile.display(), pdffile.display());
} else {
let outfileb = try!(project.write_to_file(&filled,&dyn_bill,output_ext));
debug!("{} vs\n {}", outfile.display(), outfileb.display());
util::pass_to_command(&convert_tool, &[&outfileb]);
}
// clean up expected trash files
for trash_ext in trash_exts.iter().filter_map(|x|*x){
let trash_file = to_local_file(&outfile, trash_ext);
if trash_file.exists() {
try!(fs::remove_file(&trash_file));
debug!("just deleted: {}", trash_file.display())
}
else {
debug!("I expected there to be a {}, but there wasn't any ?", trash_file.display())
}
}
if pdffile.exists(){
debug!("now there is be a {:?} -> {:?}", pdffile, target);
try!(fs::rename(&pdffile, &target));
}
}
}
Ok(())
}
/// Creates the latex files within each projects directory, either for Invoice or Offer.
#[cfg(feature="document_export")]
pub fn projects_to_doc(dir:StorageDir, search_term:&str, template_name:&str, bill_type:&Option<BillType>, dry_run:bool, force:bool) -> Result<()> {
with_projects(dir, &[search_term], |p| project_to_doc(p, template_name, bill_type, dry_run, force) )
}
fn file_age(path:&Path) -> Result<time::Duration> {
let metadata = try!(fs::metadata(path));
let accessed = try!(metadata.accessed());
Ok(try!(accessed.elapsed()))
}
/// Testing only, tries
|
projects_to_csv
|
identifier_name
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.