file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
tgsrv.go
|
tgbotapi.Chat
json.Unmarshal(b, &chat)
if chat != nil {
subs := usersub("", chat.ID, true)
cmds := subs2cmds(subs)
var txt = strings.Replace(params.SubsHelp, "channelname", chanName, -1) + "\n\nList of urls of @" + chanName + ":\n\n"
for _, v := range cmds {
txt = txt + strings.Replace(v, "delete ", "", -1) + "\n"
}
msgList.Text = txt + "\n"
}
bot.Send(msgList)
}
}
default:
bot.Send(msgCancel)
}
} else {
//unknown cmd
bot.Send(msgCancel)
}
}
}
} else {
if update.Message == nil {
continue
}
switch update.Message.Text {
case "/start":
user := update.Message.From
if userNew(user) {
m := tgbotapi.NewMessage(update.Message.Chat.ID, params.Hello)
m.DisableWebPagePreview = true
bot.Send(m)
} else {
bot.Send(tgbotapi.NewMessage(update.Message.Chat.ID, params.SomeErr))
}
case "/top":
m := tgbotapi.NewMessage(update.Message.Chat.ID, params.TopLinks)
m.DisableWebPagePreview = true
bot.Send(m)
case "/rateme":
m := tgbotapi.NewMessage(update.Message.Chat.ID, params.Rate)
m.DisableWebPagePreview = true
bot.Send(m)
case "/help":
bot.Send(tgbotapi.NewMessage(update.Message.Chat.ID, params.Help))
case "/donate":
bot.Send(tgbotapi.NewMessage(update.Message.Chat.ID, params.Donate))
case "/channels":
var cmds = make(map[string]string)
cmds["channel_!_new"] = "new channel"
url := params.Channels + strconv.FormatInt(update.Message.Chat.ID, 10)
body := httputils.HttpGet(url, nil)
channels := make(map[int64]*tgbotapi.Chat)
|
json.Unmarshal(body, &channels)
for _, channel := range channels {
cmds["channel_!_delete_!_"+channel.UserName] = "delete @" + channel.UserName
cmds["channel_!_list_!_"+channel.UserName] = "list of urls of @" + channel.UserName
}
msg := tgbotapi.NewMessage(update.Message.Chat.ID, "Instruction: http://telegra.ph/telefeedbot-05-12\n\nYour channels:\n")
msg.DisableWebPagePreview = true
msg.ReplyMarkup = createButtons(cmds)
msg.ReplyToMessageID = update.Message.MessageID
bot.Send(msg)
case "/list":
//botYa.Track(update.Message.From.ID, nil, "list")
//var cmds = make(map[string]string)
//fmt.Printf("fromid:%d: %d\n", update.Message.From.ID, update.Message.Chat.ID)
subs := usersub("", int64(update.Message.From.ID), true)
//var s = "Subscriptions (send 'delete http://..' - for unsubscribe):\n"
cmds := subs2cmds(subs)
if len(cmds) == 0 {
m := tgbotapi.NewMessage(update.Message.Chat.ID, "No feeds..\n\n"+params.Hello)
m.DisableWebPagePreview = true
bot.Send(m)
} else {
msg := tgbotapi.NewMessage(update.Message.Chat.ID, "Subscriptions (press button bellow for unsubscribe):\n")
msg.ReplyMarkup = createButtons(cmds)
msg.ReplyToMessageID = update.Message.MessageID
bot.Send(msg)
}
case "/subs":
subs := usersub("", int64(update.Message.From.ID), true)
cmds := subs2cmds(subs)
msgList := tgbotapi.NewMessage(update.Message.Chat.ID, "")
var txt = "List of urls:\nSend delete url(s) for unsubscribe\n\n"
for _, v := range cmds {
txt = txt + strings.Replace(v, "delete ", "", -1) + "\n"
}
msgList.Text = txt + "\n"
bot.Send(msgList)
default:
msg := update.Message.Text
pubFind(update.Message, msg, int64(update.Message.From.ID))
}
}
}
}
func subs2cmds(subs map[string]bool) map[string]string {
var cmds = make(map[string]string)
for k, _ := range subs {
log.Println(k)
if strings.Contains(k, params.PubNames) {
cmd := "delete https://vk.com/" + strings.Replace(k, params.PubNames, "", -1)
key := "delete" + k
cmds[key] = cmd
}
if strings.Contains(k, params.Feed) {
b := httputils.HttpGet(params.Api+k, nil)
if b != nil {
cmd := "delete " + string(b)
key := "delete" + k
cmds[key] = cmd
}
}
}
log.Println("cmds:", cmds)
return cmds
}
func createButtons(buttonsCmds map[string]string) tgbotapi.InlineKeyboardMarkup {
var buttons [][]tgbotapi.InlineKeyboardButton
var keys []string
for k := range buttonsCmds {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
choice := buttonsCmds[k]
cleanedChoice := strings.TrimSpace(choice)
cleanedChoice = strings.Replace(cleanedChoice, "\n", "", -1)
button := tgbotapi.NewInlineKeyboardButtonData(cleanedChoice, k)
buttons = append(buttons, tgbotapi.NewInlineKeyboardRow(button))
}
buttonCancel := tgbotapi.NewInlineKeyboardButtonData("Cancel", "cancel")
buttons = append(buttons, tgbotapi.NewInlineKeyboardRow(buttonCancel))
buttonsRow := tgbotapi.NewInlineKeyboardMarkup(buttons...)
return buttonsRow
}
func userNew(user *tgbotapi.User) bool {
urlUsr := params.Users + strconv.Itoa(user.ID)
log.Println("userNew", urlUsr)
b, _ := json.Marshal(user)
httputils.HttpPut(params.UserName+user.UserName, nil, b)
res := httputils.HttpPut(urlUsr, nil, b)
//telefeedbot
if user.ID > 0 {
pubSubTgAdd(146445941, "telefeedbot", nil, false, int64(user.ID))
}
return res
}
func channelNew(chat *tgbotapi.Chat) bool {
url := params.Users + strconv.FormatInt(chat.ID, 10)
log.Println("channelNew", url)
b, _ := json.Marshal(chat)
httputils.HttpPut(params.UserName+chat.UserName, nil, b)
return httputils.HttpPut(url, nil, b)
}
func pubFind(msg *tgbotapi.Message, txt string, userid int64) {
log.Println("pubFind")
var delete = false
var tmp = strings.Replace(txt, "\n", " ", -1)
tmp = strings.Replace(tmp, "\r", "", -1)
tmp = strings.TrimSpace(tmp)
words := strings.Split(tmp, " ")
for i := range words {
var word = strings.TrimSpace(words[i])
if word == "delete" || word == "Delete" {
delete = true
continue
}
if strings.HasPrefix(word, "@") {
chanName := strings.Replace(word, "@", "", -1)
url := params.UserName + chanName
b := httputils.HttpGet(url, nil)
var chat *tgbotapi.Chat
json.Unmarshal(b, &chat)
if chat != nil {
userChannelsUrl := params.Channels + strconv.FormatInt(userid, 10)
userChannelsbody := httputils.HttpGet(userChannelsUrl, nil)
userChannels := make(map[int64]*tgbotapi.Chat)
json.Unmarshal(userChannelsbody, &userChannels)
if userChannels[chat.ID] != nil {
userid = chat.ID
} else {
bot.Send(tgbotapi.NewMessage(userid, chanName+" not yours"))
}
}
continue
}
if strings.HasPrefix(word, "http") == false {
//default sheme is https
word = "https://" + word
}
urls, err := url.Parse(word)
if err != nil {
bot.Send(tgbotapi.NewMessage(msg.Chat.ID, "Rss feed on domain:'"+word+"'\n"+params.NotFound+params.Example))
return
}
mainDomain, _ := publicsuffix.EffectiveTLDPlusOne(urls.Host)
switch mainDomain {
case "t.me":
parts := strings.Split(urls.Path, "/")
if len(parts) > 1 {
channelName := "@" + parts[len(parts)-1]
m := tgbotapi.NewMessageToChannel(channelName, "Ok")
m.DisableWebPagePreview = true
reply, err := bot.Send(m)
if err != nil {
s := err.Error()
if strings.Contains(s, "orbidden")
|
random_line_split
|
|
tgsrv.go
|
s...)
return buttonsRow
}
func userNew(user *tgbotapi.User) bool {
urlUsr := params.Users + strconv.Itoa(user.ID)
log.Println("userNew", urlUsr)
b, _ := json.Marshal(user)
httputils.HttpPut(params.UserName+user.UserName, nil, b)
res := httputils.HttpPut(urlUsr, nil, b)
//telefeedbot
if user.ID > 0 {
pubSubTgAdd(146445941, "telefeedbot", nil, false, int64(user.ID))
}
return res
}
func channelNew(chat *tgbotapi.Chat) bool {
url := params.Users + strconv.FormatInt(chat.ID, 10)
log.Println("channelNew", url)
b, _ := json.Marshal(chat)
httputils.HttpPut(params.UserName+chat.UserName, nil, b)
return httputils.HttpPut(url, nil, b)
}
func pubFind(msg *tgbotapi.Message, txt string, userid int64) {
log.Println("pubFind")
var delete = false
var tmp = strings.Replace(txt, "\n", " ", -1)
tmp = strings.Replace(tmp, "\r", "", -1)
tmp = strings.TrimSpace(tmp)
words := strings.Split(tmp, " ")
for i := range words {
var word = strings.TrimSpace(words[i])
if word == "delete" || word == "Delete" {
delete = true
continue
}
if strings.HasPrefix(word, "@") {
chanName := strings.Replace(word, "@", "", -1)
url := params.UserName + chanName
b := httputils.HttpGet(url, nil)
var chat *tgbotapi.Chat
json.Unmarshal(b, &chat)
if chat != nil {
userChannelsUrl := params.Channels + strconv.FormatInt(userid, 10)
userChannelsbody := httputils.HttpGet(userChannelsUrl, nil)
userChannels := make(map[int64]*tgbotapi.Chat)
json.Unmarshal(userChannelsbody, &userChannels)
if userChannels[chat.ID] != nil {
userid = chat.ID
} else {
bot.Send(tgbotapi.NewMessage(userid, chanName+" not yours"))
}
}
continue
}
if strings.HasPrefix(word, "http") == false {
//default sheme is https
word = "https://" + word
}
urls, err := url.Parse(word)
if err != nil {
bot.Send(tgbotapi.NewMessage(msg.Chat.ID, "Rss feed on domain:'"+word+"'\n"+params.NotFound+params.Example))
return
}
mainDomain, _ := publicsuffix.EffectiveTLDPlusOne(urls.Host)
switch mainDomain {
case "t.me":
parts := strings.Split(urls.Path, "/")
if len(parts) > 1 {
channelName := "@" + parts[len(parts)-1]
m := tgbotapi.NewMessageToChannel(channelName, "Ok")
m.DisableWebPagePreview = true
reply, err := bot.Send(m)
if err != nil {
s := err.Error()
if strings.Contains(s, "orbidden") {
m := tgbotapi.NewMessage(msg.Chat.ID, "Add @telefeedbot as admin 2 channel: "+channelName)
bot.Send(m)
} else {
m := tgbotapi.NewMessage(msg.Chat.ID, s)
bot.Send(m)
}
} else {
channel := reply.Chat
addChannel(msg.Chat.ID, channel, false)
}
}
case "twitter.com":
parts := strings.Split(urls.Path, "/")
for _, part := range parts {
if part != "" {
findFeed("https://twitrss.me/twitter_user_to_rss/?user="+part, msg, delete, userid)
}
}
case "instagram.com":
parts := strings.Split(urls.Path, "/")
for _, part := range parts {
if part != "" {
findFeed("https://web.stagram.com/rss/n/"+part, msg, delete, userid)
}
}
case "vk.com":
parts := strings.Split(urls.Path, "/")
for j := range parts {
if parts[j] != "" {
domain := parts[j]
log.Println(domain)
groupDb := pubDbGet(domain)
if groupDb.Gid == 0 {
// public not found
groups := vkapi.GroupsGetById(domain)
if len(groups) > 0 {
// we have group
groupVk := groups[0]
// save group to DB
if pubDbSet(groupVk) {
// new group set
pubSubTgAdd(groupVk.Gid, groupVk.ScreenName, msg, delete, userid)
} else {
// group not set
bot.Send(tgbotapi.NewMessage(msg.Chat.ID, "Error create domain:'"+domain+"'"))
}
} else {
// group not found
bot.Send(tgbotapi.NewMessage(msg.Chat.ID, "Error vk domain:'"+domain+"'"+" not found"))
}
} else {
// public exists
pubSubTgAdd(groupDb.Gid, groupDb.ScreenName, msg, delete, userid)
}
}
}
default:
findFeed(word, msg, delete, userid)
}
}
}
func addChannel(userId int64, channel *tgbotapi.Chat, isDelete bool) {
if channel == nil {
return
}
url := params.Channels + strconv.FormatInt(userId, 10)
body := httputils.HttpGet(url, nil)
channels := make(map[int64]*tgbotapi.Chat)
json.Unmarshal(body, &channels)
channels[channel.ID] = channel
delete(channels, channel.ID)
if !isDelete {
channels[channel.ID] = channel
}
log.Println("channels ", channels)
data, err := json.Marshal(channels)
if err == nil {
result := httputils.HttpPut(url, nil, data)
if result == true {
if isDelete {
bot.Send(tgbotapi.NewMessage(userId, "👍 Removed: "+channel.UserName+"\n\n"))
} else {
//add channel as User
if channelNew(channel) {
bot.Send(tgbotapi.NewMessage(userId, channel.UserName+" 👍\n\nUse /channels for list of channels\n\nSend @"+
channel.UserName+" http://url for add url 2 channel"))
}
}
}
}
}
func findFeed(word string, msg *tgbotapi.Message, isDelete bool, userid int64) {
log.Println("word", word)
var feedlink = getFeedLink(word)
if feedlink == "" {
log.Println("feedlink", feedlink)
rss := rssExtract(word)
if rss != "" {
log.Println("rss", rss)
feedlink = getFeedLink(rss)
log.Println("feedlink", feedlink)
}
}
if feedlink != "" {
feedkey := GetMD5Hash(feedlink)
//create feed or overwrite
httputils.HttpPut(params.Feeds+feedkey, nil, []byte(feedlink))
feedSubTgAdd(feedlink, msg, isDelete, userid)
} else {
bot.Send(tgbotapi.NewMessage(msg.Chat.ID, word+"\n"+params.NotFound))
}
}
func feedSubTgAdd(feedlink string, msg *tgbotapi.Message, isDelete bool, userid int64) {
url := params.FeedSubs + GetMD5Hash(feedlink)
log.Println("feedSubTgAdd", url)
body := httputils.HttpGet(url, nil)
users := make(map[int64]bool)
json.Unmarshal(body, &users)
delete(users, userid)
if !isDelete {
users[userid] = true
}
log.Println("feedSubTgAdd users ", users)
//user subs
usersub(params.Feed+GetMD5Hash(feedlink), userid, isDelete)
data, err := json.Marshal(users)
if err == nil {
log.Println("feedSubTgAdd data ", string(data))
result := httputils.HttpPut(url, nil, data)
if result == true {
if isDelete {
bot.Send(tgbotapi.NewMessage(msg.Chat.ID, "👍 Removed: "+feedlink+"\n\n"))
} else {
bot.Send(tgbotapi.NewMessage(msg.Chat.ID, feedlink+" 👍\n\n"+
params.Psst))
}
}
}
}
func usersub(url string, userid int64, isDelete bool) map[string]bool {
suburl := params.UserSubs + strconv.FormatInt(userid, 10)
bodysub := httputils.HttpGet(suburl, nil)
subs := make(map[string]bool)
json.Unmarshal(bodysub, &subs)
delete(subs, url)
if !isDelete {
subs[url] = true
}
if url == "" {
return subs
}
bsubs, _ := json.Marshal(subs)
httputils.HttpPut(suburl, nil, bsubs)
return subs
}
func GetMD5Hash(t
|
ext string
|
identifier_name
|
|
cfg.rs
|
}
bail!("Did not find valid configuration!");
}
#[allow(dead_code)]
pub fn dummy_host_str() -> &'static str {
include_str!("dummy_host.yml")
}
#[allow(dead_code)]
pub fn dummy_host() -> Host {
Host::from_yaml(
"dummy_host".to_string(),
&YamlLoader::load_from_str(dummy_host_str()).unwrap()[0],
)
.unwrap()
}
impl Default for Config {
fn default() -> Self {
Config {
auth: Auth::default(),
default_host: None,
expire: None,
hosts: HashMap::new(),
prefix_length: 32,
verify_via_hash: true,
}
}
}
impl Config {
pub fn load<T: AsRef<str> + Display>(dir: T) -> Result<Option<Config>> {
let config_dir = match expanduser(dir.as_ref()) {
Ok(p) => p,
Err(e) => {
bail!("Error when expanding path to config file: {}", e);
}
};
let global = {
let mut global = config_dir.clone();
global.push("config.yaml");
global
};
let raw: String = match read_to_string(&global) {
Err(e) => {
debug!(
"Could not read configuration file '{}', error: {}",
global.to_str().unwrap_or("invalid"),
e
);
return Ok(None);
}
Ok(raw) => raw,
};
let mut config = Self::from_yaml(&raw)?;
let hosts_dir = {
let mut hosts_dir = config_dir;
hosts_dir.push("hosts");
hosts_dir
};
if hosts_dir.is_dir() {
for entry in read_dir(&hosts_dir)? {
let possible_host = entry?.path();
match possible_host.extension() {
None => {
continue;
}
Some(ext) => {
if ext != "yaml" {
continue;
}
}
};
let alias = match possible_host.file_stem() {
None => {
warn!(
"Could not extract file stem for: {}",
possible_host.display()
);
continue;
}
Some(alias) => alias
.to_str()
.context("Could not convert host file name to String.")?
.to_string(),
};
if config.hosts.contains_key(&alias) {
bail!("Host {} configured in config.yaml and as host-file.", alias);
};
let host_yaml = YamlLoader::load_from_str(&read_to_string(&possible_host)?)?;
let error = format!("Invalid host-file for host {}", &alias);
let host =
Host::from_yaml_with_config(alias, &host_yaml[0], &config).context(error)?;
config.hosts.insert(host.alias.clone(), host);
}
}
Ok(Some(config))
}
pub fn from_yaml(input: &str) -> Result<Config> {
let documents = match YamlLoader::load_from_str(input) {
Ok(data) => data,
Err(e) => {
bail!("Error while loading config file: {}", e);
}
};
let mut config = Config::default();
let config_yaml = match &documents[0] {
Yaml::Hash(h) => h,
_ => {
bail!("Root object in configuration file is no dictionary!");
}
};
config.prefix_length = {
let length = get_int_from(config_yaml, "prefix_length")?
.cloned()
.unwrap_or(config.prefix_length as i64);
check_prefix_length(length)?;
length as u8
};
config.auth = if let Some(Yaml::Hash(auth)) = config_yaml.get(&yaml_string("auth")) {
match Auth::from_yaml(&auth, None) {
Ok(auth) => auth,
Err(e) => {
bail!("Could not read global authentication settings: {}", e);
}
}
} else {
config.auth
};
config.default_host =
std::env::var("ASFA_HOST")
.ok()
.or(get_string_from(config_yaml, "default_host")?.cloned());
config.expire = get_string_from(config_yaml, "expire")?.cloned();
config.verify_via_hash = get_bool_from(config_yaml, "verify_via_hash")?
.cloned()
.unwrap_or(config.verify_via_hash);
match config_yaml.get(&yaml_string("hosts")) {
Some(Yaml::Hash(dict)) => {
for entry in dict.clone().entries() {
let alias = match entry.key() {
Yaml::String(alias) => alias.to_string(),
invalid => {
warn!("Found invalid alias for host entry: {:?}", invalid);
continue;
}
};
let host_yaml = entry.get();
let host = Host::from_yaml_with_config(alias.clone(), host_yaml, &config)?;
config.hosts.insert(alias, host);
}
}
// Some(Yaml::Array(a)) => a,
Some(_) => {
bail!("'hosts' entry in config file needs to be dictionary mapping host-alias to configuration!");
}
None => {
debug!("No 'hosts'-entry in config file.");
}
};
Ok(config)
}
pub fn get_host<T: AsRef<str>>(&self, alias: Option<T>) -> Result<&Host> {
match alias
.as_ref()
.map(|a| a.as_ref())
.or_else(|| self.default_host.as_deref())
{
None => match self.hosts.len() {
0 => {
bail!("No hosts configured, define some!");
}
1 => Ok(self.hosts.values().next().unwrap()),
_ => {
bail!("More than one host entry defined but neither `default_host` set in config or --config given via command line.");
}
},
Some(alias) => Ok(self
.hosts
.get(alias)
.with_context(|| format!("Did not find alias: {}", alias))?),
}
}
}
impl Host {
fn from_yaml(alias: String, input: &Yaml) -> Result<Host> {
Self::from_yaml_with_config(alias, input, &Config::default())
}
fn from_yaml_with_config(alias: String, input: &Yaml, config: &Config) -> Result<Host> {
log::trace!("Reading host: {}", alias);
if let Yaml::Hash(dict) = input {
let url = get_required(dict, "url", get_string_from)?.clone();
let hostname = get_string_from(dict, "hostname")?.cloned();
let user = get_string_from(dict, "user")?.cloned();
let expire = get_string_from(dict, "expire")?
.cloned()
.or_else(|| config.expire.clone());
let folder = expanduser(get_required(dict, "folder", get_string_from)?)?;
let group = get_string_from(dict, "group")?.cloned();
let auth = match get_dict_from(dict, "auth")? {
Some(auth) => Auth::from_yaml(auth, Some(&config.auth))?,
None => config.auth.clone(),
};
let prefix_length = match get_int_from(dict, "prefix_length")? {
Some(prefix) => {
check_prefix_length(*prefix)?;
*prefix as u8
}
None => config.prefix_length,
};
let password = get_string_from(dict, "password")?.cloned();
Ok(Host {
alias,
auth,
expire,
folder,
group,
hostname,
password,
prefix_length,
url,
user,
})
} else {
bail!("Invalid yaml data for Host-alias '{}'", alias);
}
}
pub fn get_url(&self, file: &str) -> Result<String> {
Ok(format!(
"{}/{}",
&self.url,
utf8_percent_encode(file, CONTROLS_ENHANCED)
))
}
}
impl Auth {
fn from_yaml(dict: &Hash, default: Option<&Auth>) -> Result<Auth, InvalidYamlTypeError> {
let auth_default = Self::default();
let default = default.unwrap_or(&auth_default);
let use_agent = get_bool_from(dict, "use_agent")?
.cloned()
.unwrap_or(default.use_agent);
let interactive = get_bool_from(dict, "interactive")?
.cloned()
.unwrap_or(default.interactive);
let private_key_file = get_string_from(dict, "private_key_file")?
.cloned()
.or_else(|| default.private_key_file.clone());
let private_key_file_password = get_string_from(dict, "private_key_file_password")?
.cloned()
.or_else(|| default.private_key_file_password.clone());
let from_openssh = get_bool_from(dict, "from_openssh")?
.cloned()
.unwrap_or(default.from_openssh);
Ok(Auth {
from_openssh,
interactive,
private_key_file,
private_key_file_password,
use_agent,
})
}
}
impl Default for Auth {
fn default() -> Self {
Auth {
from_openssh: true,
interactive: true,
private_key_file: None,
private_key_file_password: None,
use_agent: true,
}
}
}
fn check_prefix_length(length: i64) -> Result<()> {
if !(8..=128).contains(&length) {
|
bail! {"Prefix needs to be between 8 and 128 characters."};
|
random_line_split
|
|
cfg.rs
|
8,
/// Compute hash on remote side after upload to verify.
pub verify_via_hash: bool,
}
/// Authentication configuration
#[derive(Debug, Clone)]
pub struct Auth {
/// Try to use auth information for the given host from openssh settings
pub from_openssh: bool,
/// Perform interactive authentication (if private key is set password will be used for private
/// key instead).
pub interactive: bool,
/// Perform authentication via explicit private key
pub private_key_file: Option<String>,
/// Explicit password for private key (unsafe)
pub private_key_file_password: Option<String>,
/// Perform agent authentication
pub use_agent: bool,
}
/// A host entry
#[derive(Debug)]
pub struct Host {
/// Alias under which the host is known
pub alias: String,
/// Overwrite global authentication settings for this host.
pub auth: Auth,
/// Expire the uploaded file after the given amount of time via `at`-scheduled remote job.
///
/// Select files newer than the given duration. Durations can be: seconds (sec, s), minutes
/// (min, m), days (d), weeks (w), months (M) or years (y).
///
/// Mininum time till expiration is a minute.
///
/// Overrides the global setting.
pub expire: Option<String>,
/// In which folder do we store files on the host.
pub folder: PathBuf,
/// In case files on the remote site need to have a special group setting in order to be
/// readable by the webserver.
pub group: Option<String>,
/// Self-explanatory (if not set alias will be used)
pub hostname: Option<String>,
/// If the user REALLY REALLY wants to, a plaintext password can be provided (but it is not
/// recommended!).
pub password: Option<String>,
/// Length of prefix to use
pub prefix_length: u8,
/// url-prefix to apply to file link
pub url: String,
/// The user to sign in, otherwise ssh config will be used.
pub user: Option<String>,
}
fn default_config_directories() -> Vec<&'static str> {
vec!["~/.config/asfa", "/etc/asfa"]
}
pub fn load<T: AsRef<str> + Display>(path: &Option<T>) -> Result<Config> {
let possible_paths: Vec<&str> = match path {
Some(path) => vec![path.as_ref()],
None => default_config_directories(),
};
for path in possible_paths.iter() {
match Config::load(path)? {
None => continue,
Some(cfg) => return Ok(cfg),
}
}
bail!("Did not find valid configuration!");
}
#[allow(dead_code)]
pub fn dummy_host_str() -> &'static str {
include_str!("dummy_host.yml")
}
#[allow(dead_code)]
pub fn dummy_host() -> Host {
Host::from_yaml(
"dummy_host".to_string(),
&YamlLoader::load_from_str(dummy_host_str()).unwrap()[0],
)
.unwrap()
}
impl Default for Config {
fn default() -> Self
|
}
impl Config {
pub fn load<T: AsRef<str> + Display>(dir: T) -> Result<Option<Config>> {
let config_dir = match expanduser(dir.as_ref()) {
Ok(p) => p,
Err(e) => {
bail!("Error when expanding path to config file: {}", e);
}
};
let global = {
let mut global = config_dir.clone();
global.push("config.yaml");
global
};
let raw: String = match read_to_string(&global) {
Err(e) => {
debug!(
"Could not read configuration file '{}', error: {}",
global.to_str().unwrap_or("invalid"),
e
);
return Ok(None);
}
Ok(raw) => raw,
};
let mut config = Self::from_yaml(&raw)?;
let hosts_dir = {
let mut hosts_dir = config_dir;
hosts_dir.push("hosts");
hosts_dir
};
if hosts_dir.is_dir() {
for entry in read_dir(&hosts_dir)? {
let possible_host = entry?.path();
match possible_host.extension() {
None => {
continue;
}
Some(ext) => {
if ext != "yaml" {
continue;
}
}
};
let alias = match possible_host.file_stem() {
None => {
warn!(
"Could not extract file stem for: {}",
possible_host.display()
);
continue;
}
Some(alias) => alias
.to_str()
.context("Could not convert host file name to String.")?
.to_string(),
};
if config.hosts.contains_key(&alias) {
bail!("Host {} configured in config.yaml and as host-file.", alias);
};
let host_yaml = YamlLoader::load_from_str(&read_to_string(&possible_host)?)?;
let error = format!("Invalid host-file for host {}", &alias);
let host =
Host::from_yaml_with_config(alias, &host_yaml[0], &config).context(error)?;
config.hosts.insert(host.alias.clone(), host);
}
}
Ok(Some(config))
}
pub fn from_yaml(input: &str) -> Result<Config> {
let documents = match YamlLoader::load_from_str(input) {
Ok(data) => data,
Err(e) => {
bail!("Error while loading config file: {}", e);
}
};
let mut config = Config::default();
let config_yaml = match &documents[0] {
Yaml::Hash(h) => h,
_ => {
bail!("Root object in configuration file is no dictionary!");
}
};
config.prefix_length = {
let length = get_int_from(config_yaml, "prefix_length")?
.cloned()
.unwrap_or(config.prefix_length as i64);
check_prefix_length(length)?;
length as u8
};
config.auth = if let Some(Yaml::Hash(auth)) = config_yaml.get(&yaml_string("auth")) {
match Auth::from_yaml(&auth, None) {
Ok(auth) => auth,
Err(e) => {
bail!("Could not read global authentication settings: {}", e);
}
}
} else {
config.auth
};
config.default_host =
std::env::var("ASFA_HOST")
.ok()
.or(get_string_from(config_yaml, "default_host")?.cloned());
config.expire = get_string_from(config_yaml, "expire")?.cloned();
config.verify_via_hash = get_bool_from(config_yaml, "verify_via_hash")?
.cloned()
.unwrap_or(config.verify_via_hash);
match config_yaml.get(&yaml_string("hosts")) {
Some(Yaml::Hash(dict)) => {
for entry in dict.clone().entries() {
let alias = match entry.key() {
Yaml::String(alias) => alias.to_string(),
invalid => {
warn!("Found invalid alias for host entry: {:?}", invalid);
continue;
}
};
let host_yaml = entry.get();
let host = Host::from_yaml_with_config(alias.clone(), host_yaml, &config)?;
config.hosts.insert(alias, host);
}
}
// Some(Yaml::Array(a)) => a,
Some(_) => {
bail!("'hosts' entry in config file needs to be dictionary mapping host-alias to configuration!");
}
None => {
debug!("No 'hosts'-entry in config file.");
}
};
Ok(config)
}
pub fn get_host<T: AsRef<str>>(&self, alias: Option<T>) -> Result<&Host> {
match alias
.as_ref()
.map(|a| a.as_ref())
.or_else(|| self.default_host.as_deref())
{
None => match self.hosts.len() {
0 => {
bail!("No hosts configured, define some!");
}
1 => Ok(self.hosts.values().next().unwrap()),
_ => {
bail!("More than one host entry defined but neither `default_host` set in config or --config given via command line.");
}
},
Some(alias) => Ok(self
.hosts
.get(alias)
.with_context(|| format!("Did not find alias: {}", alias))?),
}
}
}
impl Host {
fn from_yaml(alias: String, input: &Yaml) -> Result<Host> {
Self::from_yaml_with_config(alias, input, &Config::default())
}
fn from_yaml_with_config(alias: String, input: &Yaml, config: &Config) -> Result<Host> {
log::trace!("Reading host: {}", alias);
if let Yaml::Hash(dict) = input {
let url = get_required(dict, "url", get_string_from)?.clone();
let hostname = get_string_from(dict, "hostname")?.cloned();
let user = get_string_from(dict, "user")?.cloned();
let expire = get_string_from(dict, "expire")?
.cloned()
.or_else(|| config.expire.clone());
let folder =
|
{
Config {
auth: Auth::default(),
default_host: None,
expire: None,
hosts: HashMap::new(),
prefix_length: 32,
verify_via_hash: true,
}
}
|
identifier_body
|
cfg.rs
|
8,
/// Compute hash on remote side after upload to verify.
pub verify_via_hash: bool,
}
/// Authentication configuration
#[derive(Debug, Clone)]
pub struct Auth {
/// Try to use auth information for the given host from openssh settings
pub from_openssh: bool,
/// Perform interactive authentication (if private key is set password will be used for private
/// key instead).
pub interactive: bool,
/// Perform authentication via explicit private key
pub private_key_file: Option<String>,
/// Explicit password for private key (unsafe)
pub private_key_file_password: Option<String>,
/// Perform agent authentication
pub use_agent: bool,
}
/// A host entry
#[derive(Debug)]
pub struct Host {
/// Alias under which the host is known
pub alias: String,
/// Overwrite global authentication settings for this host.
pub auth: Auth,
/// Expire the uploaded file after the given amount of time via `at`-scheduled remote job.
///
/// Select files newer than the given duration. Durations can be: seconds (sec, s), minutes
/// (min, m), days (d), weeks (w), months (M) or years (y).
///
/// Mininum time till expiration is a minute.
///
/// Overrides the global setting.
pub expire: Option<String>,
/// In which folder do we store files on the host.
pub folder: PathBuf,
/// In case files on the remote site need to have a special group setting in order to be
/// readable by the webserver.
pub group: Option<String>,
/// Self-explanatory (if not set alias will be used)
pub hostname: Option<String>,
/// If the user REALLY REALLY wants to, a plaintext password can be provided (but it is not
/// recommended!).
pub password: Option<String>,
/// Length of prefix to use
pub prefix_length: u8,
/// url-prefix to apply to file link
pub url: String,
/// The user to sign in, otherwise ssh config will be used.
pub user: Option<String>,
}
fn default_config_directories() -> Vec<&'static str> {
vec!["~/.config/asfa", "/etc/asfa"]
}
pub fn load<T: AsRef<str> + Display>(path: &Option<T>) -> Result<Config> {
let possible_paths: Vec<&str> = match path {
Some(path) => vec![path.as_ref()],
None => default_config_directories(),
};
for path in possible_paths.iter() {
match Config::load(path)? {
None => continue,
Some(cfg) => return Ok(cfg),
}
}
bail!("Did not find valid configuration!");
}
#[allow(dead_code)]
pub fn dummy_host_str() -> &'static str {
include_str!("dummy_host.yml")
}
#[allow(dead_code)]
pub fn dummy_host() -> Host {
Host::from_yaml(
"dummy_host".to_string(),
&YamlLoader::load_from_str(dummy_host_str()).unwrap()[0],
)
.unwrap()
}
impl Default for Config {
fn default() -> Self {
Config {
auth: Auth::default(),
default_host: None,
expire: None,
hosts: HashMap::new(),
prefix_length: 32,
verify_via_hash: true,
}
}
}
impl Config {
pub fn load<T: AsRef<str> + Display>(dir: T) -> Result<Option<Config>> {
let config_dir = match expanduser(dir.as_ref()) {
Ok(p) => p,
Err(e) => {
bail!("Error when expanding path to config file: {}", e);
}
};
let global = {
let mut global = config_dir.clone();
global.push("config.yaml");
global
};
let raw: String = match read_to_string(&global) {
Err(e) => {
debug!(
"Could not read configuration file '{}', error: {}",
global.to_str().unwrap_or("invalid"),
e
);
return Ok(None);
}
Ok(raw) => raw,
};
let mut config = Self::from_yaml(&raw)?;
let hosts_dir = {
let mut hosts_dir = config_dir;
hosts_dir.push("hosts");
hosts_dir
};
if hosts_dir.is_dir() {
for entry in read_dir(&hosts_dir)? {
let possible_host = entry?.path();
match possible_host.extension() {
None => {
continue;
}
Some(ext) => {
if ext != "yaml" {
continue;
}
}
};
let alias = match possible_host.file_stem() {
None => {
warn!(
"Could not extract file stem for: {}",
possible_host.display()
);
continue;
}
Some(alias) => alias
.to_str()
.context("Could not convert host file name to String.")?
.to_string(),
};
if config.hosts.contains_key(&alias) {
bail!("Host {} configured in config.yaml and as host-file.", alias);
};
let host_yaml = YamlLoader::load_from_str(&read_to_string(&possible_host)?)?;
let error = format!("Invalid host-file for host {}", &alias);
let host =
Host::from_yaml_with_config(alias, &host_yaml[0], &config).context(error)?;
config.hosts.insert(host.alias.clone(), host);
}
}
Ok(Some(config))
}
pub fn from_yaml(input: &str) -> Result<Config> {
let documents = match YamlLoader::load_from_str(input) {
Ok(data) => data,
Err(e) => {
bail!("Error while loading config file: {}", e);
}
};
let mut config = Config::default();
let config_yaml = match &documents[0] {
Yaml::Hash(h) => h,
_ => {
bail!("Root object in configuration file is no dictionary!");
}
};
config.prefix_length = {
let length = get_int_from(config_yaml, "prefix_length")?
.cloned()
.unwrap_or(config.prefix_length as i64);
check_prefix_length(length)?;
length as u8
};
config.auth = if let Some(Yaml::Hash(auth)) = config_yaml.get(&yaml_string("auth")) {
match Auth::from_yaml(&auth, None) {
Ok(auth) => auth,
Err(e) => {
bail!("Could not read global authentication settings: {}", e);
}
}
} else {
config.auth
};
config.default_host =
std::env::var("ASFA_HOST")
.ok()
.or(get_string_from(config_yaml, "default_host")?.cloned());
config.expire = get_string_from(config_yaml, "expire")?.cloned();
config.verify_via_hash = get_bool_from(config_yaml, "verify_via_hash")?
.cloned()
.unwrap_or(config.verify_via_hash);
match config_yaml.get(&yaml_string("hosts")) {
Some(Yaml::Hash(dict)) => {
for entry in dict.clone().entries() {
let alias = match entry.key() {
Yaml::String(alias) => alias.to_string(),
invalid => {
warn!("Found invalid alias for host entry: {:?}", invalid);
continue;
}
};
let host_yaml = entry.get();
let host = Host::from_yaml_with_config(alias.clone(), host_yaml, &config)?;
config.hosts.insert(alias, host);
}
}
// Some(Yaml::Array(a)) => a,
Some(_) => {
bail!("'hosts' entry in config file needs to be dictionary mapping host-alias to configuration!");
}
None => {
debug!("No 'hosts'-entry in config file.");
}
};
Ok(config)
}
pub fn get_host<T: AsRef<str>>(&self, alias: Option<T>) -> Result<&Host> {
match alias
.as_ref()
.map(|a| a.as_ref())
.or_else(|| self.default_host.as_deref())
{
None => match self.hosts.len() {
0 => {
bail!("No hosts configured, define some!");
}
1 => Ok(self.hosts.values().next().unwrap()),
_ => {
bail!("More than one host entry defined but neither `default_host` set in config or --config given via command line.");
}
},
Some(alias) => Ok(self
.hosts
.get(alias)
.with_context(|| format!("Did not find alias: {}", alias))?),
}
}
}
impl Host {
fn
|
(alias: String, input: &Yaml) -> Result<Host> {
Self::from_yaml_with_config(alias, input, &Config::default())
}
fn from_yaml_with_config(alias: String, input: &Yaml, config: &Config) -> Result<Host> {
log::trace!("Reading host: {}", alias);
if let Yaml::Hash(dict) = input {
let url = get_required(dict, "url", get_string_from)?.clone();
let hostname = get_string_from(dict, "hostname")?.cloned();
let user = get_string_from(dict, "user")?.cloned();
let expire = get_string_from(dict, "expire")?
.cloned()
.or_else(|| config.expire.clone());
let folder = expand
|
from_yaml
|
identifier_name
|
error.rs
|
2n_status_code::SUCCESS {
Ok(self)
} else {
Err(Error::capture())
}
}
}
impl Fallible for isize {
type Output = usize;
fn into_result(self) -> Result<Self::Output, Error> {
// Negative values can't be converted to a real size
// and instead indicate an error.
self.try_into().map_err(|_| Error::capture())
}
}
impl Fallible for u64 {
type Output = Self;
/// Converts a u64 to a Result by checking for u64::MAX.
///
/// If a method that returns an unsigned int is fallible,
/// then the -1 error result wraps around to u64::MAX.
///
/// For a u64 to be Fallible, a result of u64::MAX must not be
/// possible without an error. For example, [`s2n_connection_get_delay`]
/// can't return u64::MAX as a valid result because
/// s2n-tls blinding delays are limited to 30s, or a return value of 3^10 ns,
/// which is significantly less than u64::MAX. [`s2n_connection_get_delay`]
/// would therefore only return u64::MAX for a -1 error result.
fn into_result(self) -> Result<Self::Output, Error> {
if self != Self::MAX {
Ok(self)
} else {
Err(Error::capture())
}
}
}
impl<T> Fallible for *mut T {
type Output = NonNull<T>;
fn into_result(self) -> Result<Self::Output, Error> {
if let Some(value) = NonNull::new(self) {
Ok(value)
} else {
Err(Error::capture())
}
}
}
impl<T> Fallible for *const T {
type Output = *const T;
fn into_result(self) -> Result<Self::Output, Error> {
if !self.is_null() {
Ok(self)
} else {
Err(Error::capture())
}
}
}
pub trait Pollable {
type Output;
fn into_poll(self) -> Poll<Result<Self::Output, Error>>;
}
impl<T: Fallible> Pollable for T {
type Output = T::Output;
fn into_poll(self) -> Poll<Result<Self::Output, Error>> {
match self.into_result() {
Ok(r) => Ok(r).into(),
Err(err) if err.is_retryable() => Poll::Pending,
Err(err) => Err(err).into(),
}
}
}
impl Error {
pub(crate) const INVALID_INPUT: Error = Self(Context::InvalidInput);
pub(crate) const MISSING_WAKER: Error = Self(Context::MissingWaker);
/// Converts an io::Error into an s2n-tls Error
pub fn io_error(err: std::io::Error) -> Error {
let errno = err.raw_os_error().unwrap_or(1);
errno::set_errno(errno::Errno(errno));
s2n_status_code::FAILURE.into_result().unwrap_err()
}
/// An error occurred while running application code.
///
/// Can be emitted from [`crate::callbacks::ConnectionFuture::poll()`] to indicate
/// async task failure.
pub fn application(error: Box<dyn std::error::Error + Send + Sync + 'static>) -> Self {
Self(Context::Application(error))
}
fn capture() -> Self {
unsafe {
let s2n_errno = s2n_errno_location();
let code = *s2n_errno;
// https://github.com/aws/s2n-tls/blob/main/docs/USAGE-GUIDE.md#error-handling
//# To avoid possible confusion, s2n_errno should be cleared after processing
//# an error: s2n_errno = S2N_ERR_T_OK
*s2n_errno = s2n_error_type::OK as _;
Self(Context::Code(code, errno()))
}
}
pub fn name(&self) -> &'static str {
match self.0 {
Context::InvalidInput => "InvalidInput",
Context::MissingWaker => "MissingWaker",
Context::Application(_) => "ApplicationError",
Context::Code(code, _) => unsafe {
// Safety: we assume the string has a valid encoding coming from s2n
cstr_to_str(s2n_strerror_name(code))
},
}
}
pub fn message(&self) -> &'static str {
match self.0 {
Context::InvalidInput => "A parameter was incorrect",
Context::MissingWaker => {
"Tried to perform an asynchronous operation without a configured waker"
}
Context::Application(_) => "An error occurred while executing application code",
Context::Code(code, _) => unsafe {
// Safety: we assume the string has a valid encoding coming from s2n
cstr_to_str(s2n_strerror(code, core::ptr::null()))
},
}
}
pub fn debug(&self) -> Option<&'static str> {
match self.0 {
Context::InvalidInput | Context::MissingWaker | Context::Application(_) => None,
Context::Code(code, _) => unsafe {
let debug_info = s2n_strerror_debug(code, core::ptr::null());
// The debug string should be set to a constant static string
// when an error occurs, but because it starts out as NULL
// we should defend against mistakes.
if debug_info.is_null() {
None
} else {
// If the string is not null, then we can assume that
// it is constant and static.
Some(cstr_to_str(debug_info))
}
},
}
}
|
match self.0 {
Context::InvalidInput | Context::MissingWaker => ErrorType::UsageError,
Context::Application(_) => ErrorType::Application,
Context::Code(code, _) => unsafe { ErrorType::from(s2n_error_get_type(code)) },
}
}
pub fn source(&self) -> ErrorSource {
match self.0 {
Context::InvalidInput | Context::MissingWaker => ErrorSource::Bindings,
Context::Application(_) => ErrorSource::Application,
Context::Code(_, _) => ErrorSource::Library,
}
}
#[allow(clippy::borrowed_box)]
/// Returns an [`std::error::Error`] if the error source was [`ErrorSource::Application`],
/// otherwise returns None.
pub fn application_error(&self) -> Option<&Box<dyn std::error::Error + Send + Sync + 'static>> {
if let Self(Context::Application(err)) = self {
Some(err)
} else {
None
}
}
pub fn is_retryable(&self) -> bool {
matches!(self.kind(), ErrorType::Blocked)
}
}
#[cfg(feature = "quic")]
impl Error {
/// s2n-tls does not send specific errors.
///
/// However, we can attempt to map local errors into the alerts
/// that we would have sent if we sent alerts.
///
/// This API is currently incomplete and should not be relied upon.
pub fn alert(&self) -> Option<u8> {
match self.0 {
Context::InvalidInput | Context::MissingWaker | Context::Application(_) => None,
Context::Code(code, _) => {
let mut alert = 0;
let r = unsafe { s2n_error_get_alert(code, &mut alert) };
match r.into_result() {
Ok(_) => Some(alert),
Err(_) => None,
}
}
}
}
}
/// # Safety
///
/// The caller must ensure the char pointer must contain a valid
/// UTF-8 string from a trusted source
unsafe fn cstr_to_str(v: *const c_char) -> &'static str {
let slice = CStr::from_ptr(v);
let bytes = slice.to_bytes();
core::str::from_utf8_unchecked(bytes)
}
impl TryFrom<std::io::Error> for Error {
type Error = Error;
fn try_from(value: std::io::Error) -> Result<Self, Self::Error> {
let io_inner = value.into_inner().ok_or(Error::INVALID_INPUT)?;
io_inner
.downcast::<Self>()
.map(|error| *error)
.map_err(|_| Error::INVALID_INPUT)
}
}
impl From<Error> for std::io::Error {
fn from(input: Error) -> Self {
if let Context::Code(_, errno) = input.0 {
if ErrorType::IOError == input.kind() {
let bare = std::io::Error::from_raw_os_error(errno.0);
return std::io::Error::new(bare.kind(), input);
}
}
std::io::Error::new(std::io::ErrorKind::Other, input)
}
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut s = f.debug_struct("Error");
if let Context::Code(code, _) = self.0 {
s.field("code", &code);
}
s.field("name", &self.name());
s.field("message", &self.message());
s.field("kind", &self.kind());
s.field("source", &self.source());
if let Some
|
pub fn kind(&self) -> ErrorType {
|
random_line_split
|
error.rs
|
}
enum Context {
InvalidInput,
MissingWaker,
Code(s2n_status_code::Type, Errno),
Application(Box<dyn std::error::Error + Send + Sync + 'static>),
}
pub struct Error(Context);
pub trait Fallible {
type Output;
fn into_result(self) -> Result<Self::Output, Error>;
}
impl Fallible for s2n_status_code::Type {
type Output = s2n_status_code::Type;
fn into_result(self) -> Result<Self::Output, Error> {
if self >= s2n_status_code::SUCCESS {
Ok(self)
} else {
Err(Error::capture())
}
}
}
impl Fallible for isize {
type Output = usize;
fn into_result(self) -> Result<Self::Output, Error> {
// Negative values can't be converted to a real size
// and instead indicate an error.
self.try_into().map_err(|_| Error::capture())
}
}
impl Fallible for u64 {
type Output = Self;
/// Converts a u64 to a Result by checking for u64::MAX.
///
/// If a method that returns an unsigned int is fallible,
/// then the -1 error result wraps around to u64::MAX.
///
/// For a u64 to be Fallible, a result of u64::MAX must not be
/// possible without an error. For example, [`s2n_connection_get_delay`]
/// can't return u64::MAX as a valid result because
/// s2n-tls blinding delays are limited to 30s, or a return value of 3^10 ns,
/// which is significantly less than u64::MAX. [`s2n_connection_get_delay`]
/// would therefore only return u64::MAX for a -1 error result.
fn into_result(self) -> Result<Self::Output, Error> {
if self != Self::MAX {
Ok(self)
} else {
Err(Error::capture())
}
}
}
impl<T> Fallible for *mut T {
type Output = NonNull<T>;
fn into_result(self) -> Result<Self::Output, Error> {
if let Some(value) = NonNull::new(self) {
Ok(value)
} else {
Err(Error::capture())
}
}
}
impl<T> Fallible for *const T {
type Output = *const T;
fn into_result(self) -> Result<Self::Output, Error> {
if !self.is_null() {
Ok(self)
} else {
Err(Error::capture())
}
}
}
pub trait Pollable {
type Output;
fn into_poll(self) -> Poll<Result<Self::Output, Error>>;
}
impl<T: Fallible> Pollable for T {
type Output = T::Output;
fn into_poll(self) -> Poll<Result<Self::Output, Error>> {
match self.into_result() {
Ok(r) => Ok(r).into(),
Err(err) if err.is_retryable() => Poll::Pending,
Err(err) => Err(err).into(),
}
}
}
impl Error {
pub(crate) const INVALID_INPUT: Error = Self(Context::InvalidInput);
pub(crate) const MISSING_WAKER: Error = Self(Context::MissingWaker);
/// Converts an io::Error into an s2n-tls Error
pub fn io_error(err: std::io::Error) -> Error {
let errno = err.raw_os_error().unwrap_or(1);
errno::set_errno(errno::Errno(errno));
s2n_status_code::FAILURE.into_result().unwrap_err()
}
/// An error occurred while running application code.
///
/// Can be emitted from [`crate::callbacks::ConnectionFuture::poll()`] to indicate
/// async task failure.
pub fn application(error: Box<dyn std::error::Error + Send + Sync + 'static>) -> Self {
Self(Context::Application(error))
}
fn capture() -> Self {
unsafe {
let s2n_errno = s2n_errno_location();
let code = *s2n_errno;
// https://github.com/aws/s2n-tls/blob/main/docs/USAGE-GUIDE.md#error-handling
//# To avoid possible confusion, s2n_errno should be cleared after processing
//# an error: s2n_errno = S2N_ERR_T_OK
*s2n_errno = s2n_error_type::OK as _;
Self(Context::Code(code, errno()))
}
}
pub fn name(&self) -> &'static str {
match self.0 {
Context::InvalidInput => "InvalidInput",
Context::MissingWaker => "MissingWaker",
Context::Application(_) => "ApplicationError",
Context::Code(code, _) => unsafe {
// Safety: we assume the string has a valid encoding coming from s2n
cstr_to_str(s2n_strerror_name(code))
},
}
}
pub fn message(&self) -> &'static str {
match self.0 {
Context::InvalidInput => "A parameter was incorrect",
Context::MissingWaker => {
"Tried to perform an asynchronous operation without a configured waker"
}
Context::Application(_) => "An error occurred while executing application code",
Context::Code(code, _) => unsafe {
// Safety: we assume the string has a valid encoding coming from s2n
cstr_to_str(s2n_strerror(code, core::ptr::null()))
},
}
}
pub fn debug(&self) -> Option<&'static str> {
match self.0 {
Context::InvalidInput | Context::MissingWaker | Context::Application(_) => None,
Context::Code(code, _) => unsafe {
let debug_info = s2n_strerror_debug(code, core::ptr::null());
// The debug string should be set to a constant static string
// when an error occurs, but because it starts out as NULL
// we should defend against mistakes.
if debug_info.is_null() {
None
} else {
// If the string is not null, then we can assume that
// it is constant and static.
Some(cstr_to_str(debug_info))
}
},
}
}
pub fn kind(&self) -> ErrorType {
match self.0 {
Context::InvalidInput | Context::MissingWaker => ErrorType::UsageError,
Context::Application(_) => ErrorType::Application,
Context::Code(code, _) => unsafe { ErrorType::from(s2n_error_get_type(code)) },
}
}
pub fn source(&self) -> ErrorSource {
match self.0 {
Context::InvalidInput | Context::MissingWaker => ErrorSource::Bindings,
Context::Application(_) => ErrorSource::Application,
Context::Code(_, _) => ErrorSource::Library,
}
}
#[allow(clippy::borrowed_box)]
/// Returns an [`std::error::Error`] if the error source was [`ErrorSource::Application`],
/// otherwise returns None.
pub fn application_error(&self) -> Option<&Box<dyn std::error::Error + Send + Sync + 'static>> {
if let Self(Context::Application(err)) = self {
Some(err)
} else {
None
}
}
pub fn is_retryable(&self) -> bool {
matches!(self.kind(), ErrorType::Blocked)
}
}
#[cfg(feature = "quic")]
impl Error {
/// s2n-tls does not send specific errors.
///
/// However, we can attempt to map local errors into the alerts
/// that we would have sent if we sent alerts.
///
/// This API is currently incomplete and should not be relied upon.
pub fn alert(&self) -> Option<u8> {
match self.0 {
Context::InvalidInput | Context::MissingWaker | Context::Application(_) => None,
Context::Code(code, _) => {
let mut alert = 0;
let r = unsafe { s2n_error_get_alert(code, &mut alert) };
match r.into_result() {
Ok(_) => Some(alert),
Err(_) => None,
}
}
}
}
}
/// # Safety
///
/// The caller must ensure the char pointer must contain a valid
/// UTF-8 string from a trusted source
unsafe fn cstr_to_str(v: *const c_char) -> &'static str {
let slice = CStr::from_ptr(v);
let bytes = slice.to_bytes();
core::str::from_utf8_unchecked(bytes)
}
impl TryFrom<std::io::Error> for Error {
type Error = Error;
fn try_from(value: std::io::Error) -> Result<Self, Self::
|
{
match input as s2n_error_type::Type {
s2n_error_type::OK => ErrorType::NoError,
s2n_error_type::IO => ErrorType::IOError,
s2n_error_type::CLOSED => ErrorType::ConnectionClosed,
s2n_error_type::BLOCKED => ErrorType::Blocked,
s2n_error_type::ALERT => ErrorType::Alert,
s2n_error_type::PROTO => ErrorType::ProtocolError,
s2n_error_type::INTERNAL => ErrorType::InternalError,
s2n_error_type::USAGE => ErrorType::UsageError,
_ => ErrorType::UnknownErrorType,
}
}
|
identifier_body
|
|
error.rs
|
2n_status_code::SUCCESS {
Ok(self)
} else {
Err(Error::capture())
}
}
}
impl Fallible for isize {
type Output = usize;
fn into_result(self) -> Result<Self::Output, Error> {
// Negative values can't be converted to a real size
// and instead indicate an error.
self.try_into().map_err(|_| Error::capture())
}
}
impl Fallible for u64 {
type Output = Self;
/// Converts a u64 to a Result by checking for u64::MAX.
///
/// If a method that returns an unsigned int is fallible,
/// then the -1 error result wraps around to u64::MAX.
///
/// For a u64 to be Fallible, a result of u64::MAX must not be
/// possible without an error. For example, [`s2n_connection_get_delay`]
/// can't return u64::MAX as a valid result because
/// s2n-tls blinding delays are limited to 30s, or a return value of 3^10 ns,
/// which is significantly less than u64::MAX. [`s2n_connection_get_delay`]
/// would therefore only return u64::MAX for a -1 error result.
fn into_result(self) -> Result<Self::Output, Error> {
if self != Self::MAX {
Ok(self)
} else {
Err(Error::capture())
}
}
}
impl<T> Fallible for *mut T {
type Output = NonNull<T>;
fn into_result(self) -> Result<Self::Output, Error> {
if let Some(value) = NonNull::new(self) {
Ok(value)
} else {
Err(Error::capture())
}
}
}
impl<T> Fallible for *const T {
type Output = *const T;
fn into_result(self) -> Result<Self::Output, Error> {
if !self.is_null() {
Ok(self)
} else {
Err(Error::capture())
}
}
}
pub trait Pollable {
type Output;
fn into_poll(self) -> Poll<Result<Self::Output, Error>>;
}
impl<T: Fallible> Pollable for T {
type Output = T::Output;
fn into_poll(self) -> Poll<Result<Self::Output, Error>> {
match self.into_result() {
Ok(r) => Ok(r).into(),
Err(err) if err.is_retryable() => Poll::Pending,
Err(err) => Err(err).into(),
}
}
}
impl Error {
pub(crate) const INVALID_INPUT: Error = Self(Context::InvalidInput);
pub(crate) const MISSING_WAKER: Error = Self(Context::MissingWaker);
/// Converts an io::Error into an s2n-tls Error
pub fn io_error(err: std::io::Error) -> Error {
let errno = err.raw_os_error().unwrap_or(1);
errno::set_errno(errno::Errno(errno));
s2n_status_code::FAILURE.into_result().unwrap_err()
}
/// An error occurred while running application code.
///
/// Can be emitted from [`crate::callbacks::ConnectionFuture::poll()`] to indicate
/// async task failure.
pub fn application(error: Box<dyn std::error::Error + Send + Sync + 'static>) -> Self {
Self(Context::Application(error))
}
fn capture() -> Self {
unsafe {
let s2n_errno = s2n_errno_location();
let code = *s2n_errno;
// https://github.com/aws/s2n-tls/blob/main/docs/USAGE-GUIDE.md#error-handling
//# To avoid possible confusion, s2n_errno should be cleared after processing
//# an error: s2n_errno = S2N_ERR_T_OK
*s2n_errno = s2n_error_type::OK as _;
Self(Context::Code(code, errno()))
}
}
pub fn name(&self) -> &'static str {
match self.0 {
Context::InvalidInput => "InvalidInput",
Context::MissingWaker => "MissingWaker",
Context::Application(_) => "ApplicationError",
Context::Code(code, _) => unsafe {
// Safety: we assume the string has a valid encoding coming from s2n
cstr_to_str(s2n_strerror_name(code))
},
}
}
pub fn message(&self) -> &'static str {
match self.0 {
Context::InvalidInput => "A parameter was incorrect",
Context::MissingWaker => {
"Tried to perform an asynchronous operation without a configured waker"
}
Context::Application(_) => "An error occurred while executing application code",
Context::Code(code, _) => unsafe {
// Safety: we assume the string has a valid encoding coming from s2n
cstr_to_str(s2n_strerror(code, core::ptr::null()))
},
}
}
pub fn debug(&self) -> Option<&'static str> {
match self.0 {
Context::InvalidInput | Context::MissingWaker | Context::Application(_) => None,
Context::Code(code, _) => unsafe {
let debug_info = s2n_strerror_debug(code, core::ptr::null());
// The debug string should be set to a constant static string
// when an error occurs, but because it starts out as NULL
// we should defend against mistakes.
if debug_info.is_null() {
None
} else {
// If the string is not null, then we can assume that
// it is constant and static.
Some(cstr_to_str(debug_info))
}
},
}
}
pub fn kind(&self) -> ErrorType {
match self.0 {
Context::InvalidInput | Context::MissingWaker => ErrorType::UsageError,
Context::Application(_) => ErrorType::Application,
Context::Code(code, _) => unsafe { ErrorType::from(s2n_error_get_type(code)) },
}
}
pub fn source(&self) -> ErrorSource {
match self.0 {
Context::InvalidInput | Context::MissingWaker => ErrorSource::Bindings,
Context::Application(_) => ErrorSource::Application,
Context::Code(_, _) => ErrorSource::Library,
}
}
#[allow(clippy::borrowed_box)]
/// Returns an [`std::error::Error`] if the error source was [`ErrorSource::Application`],
/// otherwise returns None.
pub fn application_error(&self) -> Option<&Box<dyn std::error::Error + Send + Sync + 'static>> {
if let Self(Context::Application(err)) = self {
Some(err)
} else {
None
}
}
pub fn
|
(&self) -> bool {
matches!(self.kind(), ErrorType::Blocked)
}
}
#[cfg(feature = "quic")]
impl Error {
/// s2n-tls does not send specific errors.
///
/// However, we can attempt to map local errors into the alerts
/// that we would have sent if we sent alerts.
///
/// This API is currently incomplete and should not be relied upon.
pub fn alert(&self) -> Option<u8> {
match self.0 {
Context::InvalidInput | Context::MissingWaker | Context::Application(_) => None,
Context::Code(code, _) => {
let mut alert = 0;
let r = unsafe { s2n_error_get_alert(code, &mut alert) };
match r.into_result() {
Ok(_) => Some(alert),
Err(_) => None,
}
}
}
}
}
/// # Safety
///
/// The caller must ensure the char pointer must contain a valid
/// UTF-8 string from a trusted source
unsafe fn cstr_to_str(v: *const c_char) -> &'static str {
let slice = CStr::from_ptr(v);
let bytes = slice.to_bytes();
core::str::from_utf8_unchecked(bytes)
}
impl TryFrom<std::io::Error> for Error {
type Error = Error;
fn try_from(value: std::io::Error) -> Result<Self, Self::Error> {
let io_inner = value.into_inner().ok_or(Error::INVALID_INPUT)?;
io_inner
.downcast::<Self>()
.map(|error| *error)
.map_err(|_| Error::INVALID_INPUT)
}
}
impl From<Error> for std::io::Error {
fn from(input: Error) -> Self {
if let Context::Code(_, errno) = input.0 {
if ErrorType::IOError == input.kind() {
let bare = std::io::Error::from_raw_os_error(errno.0);
return std::io::Error::new(bare.kind(), input);
}
}
std::io::Error::new(std::io::ErrorKind::Other, input)
}
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut s = f.debug_struct("Error");
if let Context::Code(code, _) = self.0 {
s.field("code", &code);
}
s.field("name", &self.name());
s.field("message", &self.message());
s.field("kind", &self.kind());
s.field("source", &self.source());
if let Some
|
is_retryable
|
identifier_name
|
_utils.py
|
class Subscriptable(Generic[U, V]):
"""
Decorator to make a subscriptable instance from a __getitem__ function
Usage:
@Subscriptable
def my_subscriptable(key):
return key
assert my_subscriptable[8] == 8
"""
__slots__ = ("_func",)
def __init__(self, func: Callable[[U], V]) -> None:
self._func = func
# self.__doc__ = func.__doc__
def __call__(self):
raise SyntaxError("Use brackets '[]' instead")
def __getitem__(self, arg: U) -> V:
return self._func(arg)
def docstring(
docstring: str = None, *, pre: str = None, post: str = None
) -> Callable[[U], U]:
"""
Decorator to modify the docstring of an object.
For all provided strings, unused empty lines are removed, and the indentation
of the first non-empty line is removed from all lines if possible. This allows
better indentation when used as a decorator.
Unused empty lines means initial enpty lines for ``pre``, and final empty lines
for ``post``.
Arguments:
docstring: replaces the docstring of the object
pre: adds the string at the start of the object original docstring
post: adds the strings at the end of the object original docstring
"""
def edit_docstring(obj: U) -> U:
obj.__doc__ = "".join(
(
clean_docstring(pre or "", unused="pre"),
clean_docstring(docstring or (obj.__doc__ or "")),
clean_docstring(post or "", unused="post"),
)
)
return obj
return edit_docstring
# from https://stackoverflow.com/questions/5189699/how-to-make-a-class-property
class ClassPropertyDescriptor:
"""
Descriptor for class properties
"""
__slots__ = ("fget", "fset")
def __init__(
self,
fget: Union[classmethod, staticmethod],
fset: Union[classmethod, staticmethod] = None,
):
self.fget = fget
self.fset = fset
def __get__(self, obj: U, cls: Type[U] = None) -> V:
if cls is None:
cls = type(obj)
return self.fget.__get__(obj, cls)()
def __set__(self, obj: U, value: V):
if not self.fset:
raise AttributeError("can't set attribute")
type_ = type(obj)
return self.fset.__get__(obj, type_)(value)
@property
def __isabstractmethod__(self):
return any(
getattr(f, "__isabstractmethod__", False) for f in (self.fget, self.fset)
)
def setter(self, func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
self.fset = func
return self
def classproperty(
func: Union[Callable, classmethod, staticmethod]
) -> ClassPropertyDescriptor:
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
@overload
def autoformat(
cls: None, /, params: Union[str, Iterable[str]] = ("message", "msg")
) -> Callable[[Type[U]], Type[U]]:
...
@overload
def autoformat(
cls: Type[U], /, params: Union[str, Iterable[str]] = ("message", "msg")
) -> Type[U]:
...
def autoformat(
cls: Type[U] = None,
/,
params: Union[str, Iterable[str]] = ( # pylint: disable=unsubscriptable-object
"message",
"msg",
),
):
"""
Class decorator to autoformat string arguments in the __init__ method
Modify the class __init__ method in place by wrapping it. The wrapped class
will call the format() method of arguments specified in `params` that exist
in the original signature, passing all other arguments are dictionary to
str.format()
Arguments:
params -- names of the arguments to autoformats
Usage:
@autoformat
class MyException(Exception):
def __init__(self, elem, msg="{elem} is invalid"):
super().__init__(msg)
self.msg = msg
self.elem = elem
assert MyException(8).msg == "8 is invalid"
"""
if isinstance(params, str):
params = (params,)
if cls is None:
return functools.partial(autoformat, params=params)
orig_init = cls.__init__
signature = inspect.signature(orig_init)
params = signature.parameters.keys() & set(params)
@functools.wraps(orig_init)
def init(*args, **kwargs):
bounds = signature.bind(*args, **kwargs)
bounds.apply_defaults()
pre_formatted = {
name: bounds.arguments.pop(name)
for name in params
if name in bounds.arguments
}
formatted = {
name: string.format(**bounds.arguments)
for name, string in pre_formatted.items()
}
for name, arg in formatted.items():
bounds.arguments[name] = arg
return orig_init(*bounds.args, **bounds.kwargs)
# init.__signature__ = signature
setattr(cls, "__init__", init)
return cls
class Variable(str):
def __repr__(self) -> str:
return self
def __str__(self) -> str:
return self
def _sig_without(sig: inspect.Signature, param: Union[int, str]) -> inspect.Signature:
"""Removes a parameter from a Signature object
If param is an int, remove the parameter at that position, else
remove any paramater with that name
"""
if isinstance(param, int):
params = list(sig.parameters.values())
params.pop(param)
else:
params = [p for name, p in sig.parameters.items() if name != param]
return sig.replace(parameters=params)
def _sig_merge(lsig: inspect.Signature, rsig: inspect.Signature) -> inspect.Signature:
"""Merges two signature object, dropping the return annotations"""
return inspect.Signature(
sorted(
list(lsig.parameters.values()) + list(rsig.parameters.values()),
key=lambda param: param.kind,
)
)
def _sig_to_def(sig: inspect.Signature) -> str:
return str(sig).split("->", 1)[0].strip()[1:-1]
def _sig_to_call(sig: inspect.Signature) -> str:
l = []
for p in sig.parameters.values():
if p.kind is inspect.Parameter.KEYWORD_ONLY:
l.append(f"{p.name}={p.name}")
else:
l.append(p.name)
return ", ".join(l)
def post_init(cls: Type[U]) -> Type[U]:
"""
Class decorator to automatically support __post_init__() on classes
This is useful for @attr.s decorated classes, because __attr_post_init__() doesn't
support additional arguments.
This decorators wraps the class __init__ in a new function that accept merged arguments,
and dispatch them to __init__ and then __post_init__()
"""
if not isinstance(cls, type):
raise TypeError("Can only decorate classes")
if not hasattr(cls, "__post_init__"):
raise TypeError("The class must have a __post_init__() method")
# Ignore the first argument which is the "self" argument
sig = init_sig = _sig_without(inspect.signature(cls.__init__), 0)
previous = [(cls, "__init__", sig)]
for parent in reversed(cls.__mro__):
if hasattr(parent, "__post_init__"):
post_sig = _sig_without(
inspect.signature(getattr(parent, "__post_init__")), 0
)
try:
sig = _sig_merge(sig, post_sig)
except Exception as err:
# find the incompatibility
for parent, method, psig in previous:
try:
_sig_merge(psig, post_sig)
except Exception:
break
else:
raise TypeError(
"__post_init__ signature is incompatible with the class"
) from err
raise TypeError(
f"__post_init__() is incompatible with {parent.__qualname__}{method}()"
) from err
# No exception
previous.append((parent, "__post_init__", post_sig))
# handles type annotations and defaults
# inspired by the dataclasses modules
params = list(sig.parameters.values())
localns = (
{
f"__type_{p.name}": p.annotation
for p in params
if p.annotation is not inspect.Parameter.empty
}
| {
f"__default_{p.name}": p.default
for p in params
if p.default is not inspect.Parameter.empty
}
|
"""
Mixin class to make a class non-heritable and non-instanciable
"""
__slots__ = ("__weakref__",)
def __init__(self):
raise TypeError("Cannot instanciate virtual class")
def __init_subclass__(cls, *args, **kwargs):
if Virtual not in cls.__bases__:
raise TypeError("Cannot subclass virtual class")
super().__init_subclass__(*args, **kwargs)
|
identifier_body
|
|
_utils.py
|
indentation
of the first non-empty line is removed from all lines if possible. This allows
better indentation when used as a decorator.
Unused empty lines means initial enpty lines for ``pre``, and final empty lines
for ``post``.
Arguments:
docstring: replaces the docstring of the object
pre: adds the string at the start of the object original docstring
post: adds the strings at the end of the object original docstring
"""
def edit_docstring(obj: U) -> U:
obj.__doc__ = "".join(
(
clean_docstring(pre or "", unused="pre"),
clean_docstring(docstring or (obj.__doc__ or "")),
clean_docstring(post or "", unused="post"),
)
)
return obj
return edit_docstring
# from https://stackoverflow.com/questions/5189699/how-to-make-a-class-property
class
|
:
"""
Descriptor for class properties
"""
__slots__ = ("fget", "fset")
def __init__(
self,
fget: Union[classmethod, staticmethod],
fset: Union[classmethod, staticmethod] = None,
):
self.fget = fget
self.fset = fset
def __get__(self, obj: U, cls: Type[U] = None) -> V:
if cls is None:
cls = type(obj)
return self.fget.__get__(obj, cls)()
def __set__(self, obj: U, value: V):
if not self.fset:
raise AttributeError("can't set attribute")
type_ = type(obj)
return self.fset.__get__(obj, type_)(value)
@property
def __isabstractmethod__(self):
return any(
getattr(f, "__isabstractmethod__", False) for f in (self.fget, self.fset)
)
def setter(self, func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
self.fset = func
return self
def classproperty(
func: Union[Callable, classmethod, staticmethod]
) -> ClassPropertyDescriptor:
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
@overload
def autoformat(
cls: None, /, params: Union[str, Iterable[str]] = ("message", "msg")
) -> Callable[[Type[U]], Type[U]]:
...
@overload
def autoformat(
cls: Type[U], /, params: Union[str, Iterable[str]] = ("message", "msg")
) -> Type[U]:
...
def autoformat(
cls: Type[U] = None,
/,
params: Union[str, Iterable[str]] = ( # pylint: disable=unsubscriptable-object
"message",
"msg",
),
):
"""
Class decorator to autoformat string arguments in the __init__ method
Modify the class __init__ method in place by wrapping it. The wrapped class
will call the format() method of arguments specified in `params` that exist
in the original signature, passing all other arguments are dictionary to
str.format()
Arguments:
params -- names of the arguments to autoformats
Usage:
@autoformat
class MyException(Exception):
def __init__(self, elem, msg="{elem} is invalid"):
super().__init__(msg)
self.msg = msg
self.elem = elem
assert MyException(8).msg == "8 is invalid"
"""
if isinstance(params, str):
params = (params,)
if cls is None:
return functools.partial(autoformat, params=params)
orig_init = cls.__init__
signature = inspect.signature(orig_init)
params = signature.parameters.keys() & set(params)
@functools.wraps(orig_init)
def init(*args, **kwargs):
bounds = signature.bind(*args, **kwargs)
bounds.apply_defaults()
pre_formatted = {
name: bounds.arguments.pop(name)
for name in params
if name in bounds.arguments
}
formatted = {
name: string.format(**bounds.arguments)
for name, string in pre_formatted.items()
}
for name, arg in formatted.items():
bounds.arguments[name] = arg
return orig_init(*bounds.args, **bounds.kwargs)
# init.__signature__ = signature
setattr(cls, "__init__", init)
return cls
class Variable(str):
def __repr__(self) -> str:
return self
def __str__(self) -> str:
return self
def _sig_without(sig: inspect.Signature, param: Union[int, str]) -> inspect.Signature:
"""Removes a parameter from a Signature object
If param is an int, remove the parameter at that position, else
remove any paramater with that name
"""
if isinstance(param, int):
params = list(sig.parameters.values())
params.pop(param)
else:
params = [p for name, p in sig.parameters.items() if name != param]
return sig.replace(parameters=params)
def _sig_merge(lsig: inspect.Signature, rsig: inspect.Signature) -> inspect.Signature:
"""Merges two signature object, dropping the return annotations"""
return inspect.Signature(
sorted(
list(lsig.parameters.values()) + list(rsig.parameters.values()),
key=lambda param: param.kind,
)
)
def _sig_to_def(sig: inspect.Signature) -> str:
return str(sig).split("->", 1)[0].strip()[1:-1]
def _sig_to_call(sig: inspect.Signature) -> str:
l = []
for p in sig.parameters.values():
if p.kind is inspect.Parameter.KEYWORD_ONLY:
l.append(f"{p.name}={p.name}")
else:
l.append(p.name)
return ", ".join(l)
def post_init(cls: Type[U]) -> Type[U]:
"""
Class decorator to automatically support __post_init__() on classes
This is useful for @attr.s decorated classes, because __attr_post_init__() doesn't
support additional arguments.
This decorators wraps the class __init__ in a new function that accept merged arguments,
and dispatch them to __init__ and then __post_init__()
"""
if not isinstance(cls, type):
raise TypeError("Can only decorate classes")
if not hasattr(cls, "__post_init__"):
raise TypeError("The class must have a __post_init__() method")
# Ignore the first argument which is the "self" argument
sig = init_sig = _sig_without(inspect.signature(cls.__init__), 0)
previous = [(cls, "__init__", sig)]
for parent in reversed(cls.__mro__):
if hasattr(parent, "__post_init__"):
post_sig = _sig_without(
inspect.signature(getattr(parent, "__post_init__")), 0
)
try:
sig = _sig_merge(sig, post_sig)
except Exception as err:
# find the incompatibility
for parent, method, psig in previous:
try:
_sig_merge(psig, post_sig)
except Exception:
break
else:
raise TypeError(
"__post_init__ signature is incompatible with the class"
) from err
raise TypeError(
f"__post_init__() is incompatible with {parent.__qualname__}{method}()"
) from err
# No exception
previous.append((parent, "__post_init__", post_sig))
# handles type annotations and defaults
# inspired by the dataclasses modules
params = list(sig.parameters.values())
localns = (
{
f"__type_{p.name}": p.annotation
for p in params
if p.annotation is not inspect.Parameter.empty
}
| {
f"__default_{p.name}": p.default
for p in params
if p.default is not inspect.Parameter.empty
}
| cls.__dict__
)
for i, p in enumerate(params):
if p.default is not inspect.Parameter.empty:
p = p.replace(default=Variable(f"__default_{p.name}"))
if p.annotation is not inspect.Parameter.empty:
p = p.replace(annotation=f"__type_{p.name}")
params[i] = p
new_sig = inspect.Signature(params)
# Build the new __init__ source code
self_ = "self" if "self" not in sig.parameters else "__post_init_self"
init_lines = [
f"def __init__({self_}, {_sig_to_def(new_sig)}) -> None:",
f"__original_init({self_}, {_sig_to_call(init_sig)})",
]
for parent, method, psig in previous[1:]:
if hasattr(parent, "__post_init__"):
if parent is not cls:
init_lines.append(
f"super({parent.__qualname__}, {self_}).{method}({_sig_to_call(psig)})"
)
else:
init_lines.append(f"{self_}.{method}({_sig_to_call(psig)})")
init_src = "\n ".join(init_lines)
# Build the factory function source code
local_vars = ", ".join(localns.keys())
factory_src = (
f"def __make_init__(__original_init, {local_vars}):\n"
f" {init
|
ClassPropertyDescriptor
|
identifier_name
|
_utils.py
|
indentation
of the first non-empty line is removed from all lines if possible. This allows
better indentation when used as a decorator.
Unused empty lines means initial enpty lines for ``pre``, and final empty lines
for ``post``.
Arguments:
docstring: replaces the docstring of the object
pre: adds the string at the start of the object original docstring
post: adds the strings at the end of the object original docstring
"""
def edit_docstring(obj: U) -> U:
obj.__doc__ = "".join(
(
clean_docstring(pre or "", unused="pre"),
clean_docstring(docstring or (obj.__doc__ or "")),
clean_docstring(post or "", unused="post"),
)
)
return obj
return edit_docstring
# from https://stackoverflow.com/questions/5189699/how-to-make-a-class-property
class ClassPropertyDescriptor:
"""
Descriptor for class properties
"""
__slots__ = ("fget", "fset")
def __init__(
self,
fget: Union[classmethod, staticmethod],
fset: Union[classmethod, staticmethod] = None,
):
self.fget = fget
self.fset = fset
def __get__(self, obj: U, cls: Type[U] = None) -> V:
if cls is None:
cls = type(obj)
return self.fget.__get__(obj, cls)()
def __set__(self, obj: U, value: V):
if not self.fset:
raise AttributeError("can't set attribute")
type_ = type(obj)
return self.fset.__get__(obj, type_)(value)
@property
def __isabstractmethod__(self):
return any(
getattr(f, "__isabstractmethod__", False) for f in (self.fget, self.fset)
)
def setter(self, func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
self.fset = func
return self
def classproperty(
func: Union[Callable, classmethod, staticmethod]
) -> ClassPropertyDescriptor:
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
@overload
def autoformat(
cls: None, /, params: Union[str, Iterable[str]] = ("message", "msg")
) -> Callable[[Type[U]], Type[U]]:
...
@overload
def autoformat(
cls: Type[U], /, params: Union[str, Iterable[str]] = ("message", "msg")
) -> Type[U]:
...
def autoformat(
cls: Type[U] = None,
/,
params: Union[str, Iterable[str]] = ( # pylint: disable=unsubscriptable-object
"message",
"msg",
),
):
"""
Class decorator to autoformat string arguments in the __init__ method
Modify the class __init__ method in place by wrapping it. The wrapped class
will call the format() method of arguments specified in `params` that exist
in the original signature, passing all other arguments are dictionary to
str.format()
Arguments:
params -- names of the arguments to autoformats
Usage:
@autoformat
class MyException(Exception):
def __init__(self, elem, msg="{elem} is invalid"):
super().__init__(msg)
self.msg = msg
self.elem = elem
assert MyException(8).msg == "8 is invalid"
"""
if isinstance(params, str):
params = (params,)
if cls is None:
return functools.partial(autoformat, params=params)
orig_init = cls.__init__
signature = inspect.signature(orig_init)
params = signature.parameters.keys() & set(params)
@functools.wraps(orig_init)
def init(*args, **kwargs):
bounds = signature.bind(*args, **kwargs)
bounds.apply_defaults()
pre_formatted = {
name: bounds.arguments.pop(name)
for name in params
if name in bounds.arguments
}
formatted = {
name: string.format(**bounds.arguments)
for name, string in pre_formatted.items()
}
for name, arg in formatted.items():
bounds.arguments[name] = arg
return orig_init(*bounds.args, **bounds.kwargs)
# init.__signature__ = signature
setattr(cls, "__init__", init)
return cls
class Variable(str):
def __repr__(self) -> str:
return self
def __str__(self) -> str:
return self
def _sig_without(sig: inspect.Signature, param: Union[int, str]) -> inspect.Signature:
"""Removes a parameter from a Signature object
If param is an int, remove the parameter at that position, else
remove any paramater with that name
"""
if isinstance(param, int):
params = list(sig.parameters.values())
params.pop(param)
else:
params = [p for name, p in sig.parameters.items() if name != param]
return sig.replace(parameters=params)
def _sig_merge(lsig: inspect.Signature, rsig: inspect.Signature) -> inspect.Signature:
"""Merges two signature object, dropping the return annotations"""
return inspect.Signature(
sorted(
list(lsig.parameters.values()) + list(rsig.parameters.values()),
key=lambda param: param.kind,
)
|
)
def _sig_to_def(sig: inspect.Signature) -> str:
return str(sig).split("->", 1)[0].strip()[1:-1]
def _sig_to_call(sig: inspect.Signature) -> str:
l = []
for p in sig.parameters.values():
if p.kind is inspect.Parameter.KEYWORD_ONLY:
l.append(f"{p.name}={p.name}")
else:
l.append(p.name)
return ", ".join(l)
def post_init(cls: Type[U]) -> Type[U]:
"""
Class decorator to automatically support __post_init__() on classes
This is useful for @attr.s decorated classes, because __attr_post_init__() doesn't
support additional arguments.
This decorators wraps the class __init__ in a new function that accept merged arguments,
and dispatch them to __init__ and then __post_init__()
"""
if not isinstance(cls, type):
raise TypeError("Can only decorate classes")
if not hasattr(cls, "__post_init__"):
raise TypeError("The class must have a __post_init__() method")
# Ignore the first argument which is the "self" argument
sig = init_sig = _sig_without(inspect.signature(cls.__init__), 0)
previous = [(cls, "__init__", sig)]
for parent in reversed(cls.__mro__):
if hasattr(parent, "__post_init__"):
post_sig = _sig_without(
inspect.signature(getattr(parent, "__post_init__")), 0
)
try:
sig = _sig_merge(sig, post_sig)
except Exception as err:
# find the incompatibility
for parent, method, psig in previous:
try:
_sig_merge(psig, post_sig)
except Exception:
break
else:
raise TypeError(
"__post_init__ signature is incompatible with the class"
) from err
raise TypeError(
f"__post_init__() is incompatible with {parent.__qualname__}{method}()"
) from err
# No exception
previous.append((parent, "__post_init__", post_sig))
# handles type annotations and defaults
# inspired by the dataclasses modules
params = list(sig.parameters.values())
localns = (
{
f"__type_{p.name}": p.annotation
for p in params
if p.annotation is not inspect.Parameter.empty
}
| {
f"__default_{p.name}": p.default
for p in params
if p.default is not inspect.Parameter.empty
}
| cls.__dict__
)
for i, p in enumerate(params):
if p.default is not inspect.Parameter.empty:
p = p.replace(default=Variable(f"__default_{p.name}"))
if p.annotation is not inspect.Parameter.empty:
p = p.replace(annotation=f"__type_{p.name}")
params[i] = p
new_sig = inspect.Signature(params)
# Build the new __init__ source code
self_ = "self" if "self" not in sig.parameters else "__post_init_self"
init_lines = [
f"def __init__({self_}, {_sig_to_def(new_sig)}) -> None:",
f"__original_init({self_}, {_sig_to_call(init_sig)})",
]
for parent, method, psig in previous[1:]:
if hasattr(parent, "__post_init__"):
if parent is not cls:
init_lines.append(
f"super({parent.__qualname__}, {self_}).{method}({_sig_to_call(psig)})"
)
else:
init_lines.append(f"{self_}.{method}({_sig_to_call(psig)})")
init_src = "\n ".join(init_lines)
# Build the factory function source code
local_vars = ", ".join(localns.keys())
factory_src = (
f"def __make_init__(__original_init, {local_vars}):\n"
f" {init_src
|
random_line_split
|
|
_utils.py
|
_)(value)
@property
def __isabstractmethod__(self):
return any(
getattr(f, "__isabstractmethod__", False) for f in (self.fget, self.fset)
)
def setter(self, func):
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
self.fset = func
return self
def classproperty(
func: Union[Callable, classmethod, staticmethod]
) -> ClassPropertyDescriptor:
if not isinstance(func, (classmethod, staticmethod)):
func = classmethod(func)
return ClassPropertyDescriptor(func)
@overload
def autoformat(
cls: None, /, params: Union[str, Iterable[str]] = ("message", "msg")
) -> Callable[[Type[U]], Type[U]]:
...
@overload
def autoformat(
cls: Type[U], /, params: Union[str, Iterable[str]] = ("message", "msg")
) -> Type[U]:
...
def autoformat(
cls: Type[U] = None,
/,
params: Union[str, Iterable[str]] = ( # pylint: disable=unsubscriptable-object
"message",
"msg",
),
):
"""
Class decorator to autoformat string arguments in the __init__ method
Modify the class __init__ method in place by wrapping it. The wrapped class
will call the format() method of arguments specified in `params` that exist
in the original signature, passing all other arguments are dictionary to
str.format()
Arguments:
params -- names of the arguments to autoformats
Usage:
@autoformat
class MyException(Exception):
def __init__(self, elem, msg="{elem} is invalid"):
super().__init__(msg)
self.msg = msg
self.elem = elem
assert MyException(8).msg == "8 is invalid"
"""
if isinstance(params, str):
params = (params,)
if cls is None:
return functools.partial(autoformat, params=params)
orig_init = cls.__init__
signature = inspect.signature(orig_init)
params = signature.parameters.keys() & set(params)
@functools.wraps(orig_init)
def init(*args, **kwargs):
bounds = signature.bind(*args, **kwargs)
bounds.apply_defaults()
pre_formatted = {
name: bounds.arguments.pop(name)
for name in params
if name in bounds.arguments
}
formatted = {
name: string.format(**bounds.arguments)
for name, string in pre_formatted.items()
}
for name, arg in formatted.items():
bounds.arguments[name] = arg
return orig_init(*bounds.args, **bounds.kwargs)
# init.__signature__ = signature
setattr(cls, "__init__", init)
return cls
class Variable(str):
def __repr__(self) -> str:
return self
def __str__(self) -> str:
return self
def _sig_without(sig: inspect.Signature, param: Union[int, str]) -> inspect.Signature:
"""Removes a parameter from a Signature object
If param is an int, remove the parameter at that position, else
remove any paramater with that name
"""
if isinstance(param, int):
params = list(sig.parameters.values())
params.pop(param)
else:
params = [p for name, p in sig.parameters.items() if name != param]
return sig.replace(parameters=params)
def _sig_merge(lsig: inspect.Signature, rsig: inspect.Signature) -> inspect.Signature:
"""Merges two signature object, dropping the return annotations"""
return inspect.Signature(
sorted(
list(lsig.parameters.values()) + list(rsig.parameters.values()),
key=lambda param: param.kind,
)
)
def _sig_to_def(sig: inspect.Signature) -> str:
return str(sig).split("->", 1)[0].strip()[1:-1]
def _sig_to_call(sig: inspect.Signature) -> str:
l = []
for p in sig.parameters.values():
if p.kind is inspect.Parameter.KEYWORD_ONLY:
l.append(f"{p.name}={p.name}")
else:
l.append(p.name)
return ", ".join(l)
def post_init(cls: Type[U]) -> Type[U]:
"""
Class decorator to automatically support __post_init__() on classes
This is useful for @attr.s decorated classes, because __attr_post_init__() doesn't
support additional arguments.
This decorators wraps the class __init__ in a new function that accept merged arguments,
and dispatch them to __init__ and then __post_init__()
"""
if not isinstance(cls, type):
raise TypeError("Can only decorate classes")
if not hasattr(cls, "__post_init__"):
raise TypeError("The class must have a __post_init__() method")
# Ignore the first argument which is the "self" argument
sig = init_sig = _sig_without(inspect.signature(cls.__init__), 0)
previous = [(cls, "__init__", sig)]
for parent in reversed(cls.__mro__):
if hasattr(parent, "__post_init__"):
post_sig = _sig_without(
inspect.signature(getattr(parent, "__post_init__")), 0
)
try:
sig = _sig_merge(sig, post_sig)
except Exception as err:
# find the incompatibility
for parent, method, psig in previous:
try:
_sig_merge(psig, post_sig)
except Exception:
break
else:
raise TypeError(
"__post_init__ signature is incompatible with the class"
) from err
raise TypeError(
f"__post_init__() is incompatible with {parent.__qualname__}{method}()"
) from err
# No exception
previous.append((parent, "__post_init__", post_sig))
# handles type annotations and defaults
# inspired by the dataclasses modules
params = list(sig.parameters.values())
localns = (
{
f"__type_{p.name}": p.annotation
for p in params
if p.annotation is not inspect.Parameter.empty
}
| {
f"__default_{p.name}": p.default
for p in params
if p.default is not inspect.Parameter.empty
}
| cls.__dict__
)
for i, p in enumerate(params):
if p.default is not inspect.Parameter.empty:
p = p.replace(default=Variable(f"__default_{p.name}"))
if p.annotation is not inspect.Parameter.empty:
p = p.replace(annotation=f"__type_{p.name}")
params[i] = p
new_sig = inspect.Signature(params)
# Build the new __init__ source code
self_ = "self" if "self" not in sig.parameters else "__post_init_self"
init_lines = [
f"def __init__({self_}, {_sig_to_def(new_sig)}) -> None:",
f"__original_init({self_}, {_sig_to_call(init_sig)})",
]
for parent, method, psig in previous[1:]:
if hasattr(parent, "__post_init__"):
if parent is not cls:
init_lines.append(
f"super({parent.__qualname__}, {self_}).{method}({_sig_to_call(psig)})"
)
else:
init_lines.append(f"{self_}.{method}({_sig_to_call(psig)})")
init_src = "\n ".join(init_lines)
# Build the factory function source code
local_vars = ", ".join(localns.keys())
factory_src = (
f"def __make_init__(__original_init, {local_vars}):\n"
f" {init_src}\n"
" return __init__"
)
# Create new __init__ with the factory
globalns = inspect.getmodule(cls).__dict__
ns: dict[str, Any] = {}
exec(factory_src, globalns, ns)
init = ns["__make_init__"](cls.__init__, **localns)
self_param = inspect.Parameter(self_, inspect.Parameter.POSITIONAL_ONLY)
init.__signature__ = inspect.Signature(
parameters=[self_param] + list(sig.parameters.values()), return_annotation=None
)
setattr(cls, "__init__", init)
return cls
@contextmanager
def on_error(func, *args, yield_=None, **kwargs):
"""
Context manager that calls a function if the managed code doesn't raise
"""
try:
yield yield_
except Exception:
func(*args, **kwargs)
raise
@contextmanager
def on_success(func, *args, yield_=None, **kwargs):
"""
Context manager that calls a function if the managed code raises an Exception
"""
try:
yield yield_
except Exception:
raise
else:
func(*args, **kwargs)
def clean_docstring(doc: str, unused: Literal["pre", "post"] = None) -> str:
"""
Removes initial empty lines and shared indentation
Arguments:
doc: docstring to clean up
unused: whether to remove statring or endind empty lines
Returns:
The cleaned docstring
"""
doc = doc.split("\n")
if unused == "pre":
|
try:
index = next(i for i, l in enumerate(doc) if l.strip())
doc = doc[index:]
except StopIteration:
doc = []
|
conditional_block
|
|
monotone_stack.go
|
/)
- [2104. 子数组范围和](https://leetcode.cn/problems/sum-of-subarray-ranges/)
- [2281. 巫师的总力量和](https://leetcode.cn/problems/sum-of-total-strength-of-wizards/)
- [2818. 操作使得分最大](https://leetcode.cn/problems/apply-operations-to-maximize-score/)
模板题
https://www.luogu.com.cn/problem/P5788
https://www.luogu.com.cn/problem/P2866 http://poj.org/problem?id=3250
NEERC05,UVa 1619 https://onlinejudge.org/index.php?option=com_onlinejudge&Itemid=8&category=825&page=show_problem&problem=4494
转换 https://codeforces.com/problemset/problem/280/B
转换 LC2289 https://leetcode.cn/problems/steps-to-make-array-non-decreasing/
max >= sum https://codeforces.com/problemset/problem/1691/D
LC1124 https://leetcode.cn/problems/longest-well-performing-interval/
你从单调栈学到了什么思想?LC1944 https://leetcode.cn/problems/number-of-visible-people-in-a-queue/
下下个最大元素 LC2454 https://leetcode.cn/problems/next-greater-element-iv/
- 应用 https://atcoder.jp/contests/abc140/tasks/abc140_e
max(最小值*子数组和) LC1856 https://leetcode.cn/problems/maximum-subarray-min-product/
字典序最小
LC316 https://leetcode.cn/problems/remove-duplicate-letters/
- 扩展:重复个数不超过 limit https://leetcode.cn/contest/tianchi2022/problems/ev2bru/
LC402 https://leetcode.cn/problems/remove-k-digits/
LC321 https://leetcode.cn/problems/create-maximum-number/
计算贡献(所有子数组的……的和)
最小值 LC907 https://leetcode.cn/problems/sum-of-subarray-minimums/
最大值-最小值 LC2104 https://leetcode.cn/problems/sum-of-subarray-ranges/
最小值*和 LC2281 https://leetcode.cn/problems/sum-of-total-strength-of-wizards/
第二大 https://atcoder.jp/contests/abc140/tasks/abc140_e
与 DP 结合
https://codeforces.com/problemset/problem/5/E
https://codeforces.com/problemset/problem/1313/C2
https://codeforces.com/problemset/problem/1407/D
结合线段树,或者巧妙地在单调栈中去维护最值 https://codeforces.com/problemset/problem/1483/C
按照最大值分类讨论 LC1335 https://leetcode.cn/problems/minimum-difficulty-of-a-job-schedule/
LC2355 https://leetcode.cn/problems/maximum-number-of-books-you-can-take/
其他
LC42 接雨水 https://leetcode-cn.com/problems/trapping-rain-water/
评注:接雨水有三种不同的解法(DP、单调栈和双指针),其中双指针是 DP 的空间优化写法,讲解见 https://www.bilibili.com/video/BV1Qg411q7ia/
本质上是两种计算策略:计算每个下标处的接水量(纵向累加),计算一段高度对应的接水宽度(横向累加)
LC84 柱状图中最大的矩形 https://leetcode-cn.com/problems/largest-rectangle-in-histogram/ http://poj.org/problem?id=2559 http://poj.org/problem?id=2082
LC85 最大全 1 矩形(实现见下面的 maximalRectangleArea)https://leetcode-cn.com/problems/maximal-rectangle/ 原题为 http://poj.org/problem?id=3494
LC1504 全 1 矩形个数(实现见下面的 numSubmat)https://leetcode-cn.com/problems/count-submatrices-with-all-ones/
LC768 https://leetcode.cn/problems/max-chunks-to-make-sorted-ii/
LC2735 https://leetcode.cn/problems/collecting-chocolates/solutions/2305119/xian-xing-zuo-fa-by-heltion-ypdx/
LC2736 https://leetcode.cn/problems/maximum-sum-queries/
后缀数组+不同矩形对应方案数之和 https://codeforces.com/edu/course/2/lesson/2/5/practice/contest/269656/problem/D
与 bitOpTrickCnt 结合(见 bits.go)https://codeforces.com/problemset/problem/875/D
已知部分 right 还原全部 right;已知 right 还原 a https://codeforces.com/problemset/problem/1158/C
*/
func monotoneStack(a []int) ([]int, []int) {
const mod int = 1e9 + 7
// 考察局部最小
// 如果有相同元素,需要把某一侧循环内的符号改成小于等于
// 求左侧严格小于 a[i] 的最近位置 left[i],这样 a[i] 就是区间 [left[i]+1,i] 内最小的元素(之一)
// 如果改成求左侧小于等于,那么 a[i] 就是区间 [left[i]+1,i] 内独一无二的最小元素
// 不存在时 left[i] = -1
// 虽然写了个二重循环,但站在每个元素的视角看,这个元素在二重循环中最多入栈出栈各一次,因此整个二重循环的时间复杂度为 O(n)
n := len(a)
left := make([]int, n)
st := []int{-1} // 栈底哨兵,在栈为空时可以直接把 left[i] 赋值为 -1
for i, v := range a {
// 求左侧 < v : >=
// 求左侧 <= v : >
// 求左侧 > v : <=
// 求左侧 >= v : <
for len(st) > 1 && a[st[len(st)-1]] >= v { // 这里的符号和要求的是反过来的
st = st[:len(st)-1]
}
// 不断弹出 >= v 的,那么循环结束后栈顶就是 < v 的
left[i] = st[len(st)-1]
st = append(st, i)
}
// 求右侧严格小于 a[i] 的最近位置 right[i],这样 a[i] 就是区间 [i,right[i]-1] 内最小的元素(之一)
// 如果改成求右侧小于等于,那么 a[i] 就是区间 [i,right[i]-1] 内独一无二的最小元素
// 不存在时 right[i] = n
right := make([]int, n)
st = []int{n}
for i := n - 1; i >= 0; i-- {
v := a[i]
for len(st) > 1 && a[st[len(st)-1]] >= v { // 同上
st = st[:len(st)-1]
}
right[i] = st[len(st)-1]
st = append(st, i)
}
sum := make([]int, n+1) // int64
for i, v := range a {
sum[i+1] = (sum[i] + v) % mod
}
// EXTRA:计算贡献(注意取模时避免出现负数)
for i, v := range a {
_ = v
//l, r := left[i]+1, right[i] // [l,r) 左闭右开
tot := (i - left[i]) * (right[i] - i)
_ = tot
//tot := (sum[r] + mod - sum[l]) % mod
}
{
// TIPS: 如果有一侧定义成小于等于,还可以一次遍历求出 left 和 right
left := make([]int, n)
right := make([]int, n)
|
}
for i, v := range a {
sz := right[i] - left[i] - 1
if v > ans
|
for i := range right {
right[i] = n
}
st := []int{-1}
for i, v := range a {
for len(st) > 1 && a[st[len(st)-1]] >= v { // 这里是 right 小于等于
right[st[len(st)-1]] = i
st = st[:len(st)-1]
}
left[i] = st[len(st)-1]
st = append(st, i)
}
}
// EXTRA: 求所有长为 i 的子区间的最小值的最大值
// https://codeforces.com/problemset/problem/547/B LC1950 https://leetcode-cn.com/problems/maximum-of-minimum-values-in-all-subarrays/
{
ans := make([]int, n+1)
for i := range ans {
ans[i] = -2e9
|
identifier_body
|
monotone_stack.go
|
/)
- [2104. 子数组范围和](https://leetcode.cn/problems/sum-of-subarray-ranges/)
- [2281. 巫师的总力量和](https://leetcode.cn/problems/sum-of-total-strength-of-wizards/)
- [2818. 操作使得分最大](https://leetcode.cn/problems/apply-operations-to-maximize-score/)
模板题
https://www.luogu.com.cn/problem/P5788
https://www.luogu.com.cn/problem/P2866 http://poj.org/problem?id=3250
NEERC05,UVa 1619 https://onlinejudge.org/index.php?option=com_onlinejudge&Itemid=8&category=825&page=show_problem&problem=4494
转换 https://codeforces.com/problemset/problem/280/B
转换 LC2289 https://leetcode.cn/problems/steps-to-make-array-non-decreasing/
max >= sum https://codeforces.com/problemset/problem/1691/D
LC1124 https://leetcode.cn/problems/longest-well-performing-interval/
你从单调栈学到了什么思想?LC1944 https://leetcode.cn/problems/number-of-visible-people-in-a-queue/
下下个最大元素 LC2454 https://leetcode.cn/problems/next-greater-element-iv/
- 应用 https://atcoder.jp/contests/abc140/tasks/abc140_e
max(最小值*子数组和) LC1856 https://leetcode.cn/problems/maximum-subarray-min-product/
字典序最小
LC316 https://leetcode.cn/problems/remove-duplicate-letters/
- 扩展:重复个数不超过 limit https://leetcode.cn/contest/tianchi2022/problems/ev2bru/
LC402 https://leetcode.cn/problems/remove-k-digits/
LC321 https://leetcode.cn/problems/create-maximum-number/
计算贡献(所有子数组的……的和)
最小值 LC907 https://leetcode.cn/problems/sum-of-subarray-minimums/
最大值-最小值 LC2104 https://leetcode.cn/problems/sum-of-subarray-ranges/
最小值*和 LC2281 https://leetcode.cn/problems/sum-of-total-strength-of-wizards/
第二大 https://atcoder.jp/contests/abc140/tasks/abc140_e
与 DP 结合
https://codeforces.com/problemset/problem/5/E
https://codeforces.com/problemset/problem/1313/C2
https://codeforces.com/problemset/problem/1407/D
结合线段树,或者巧妙地在单调栈中去维护最值 https://codeforces.com/problemset/problem/1483/C
按照最大值分类讨论 LC1335 https://leetcode.cn/problems/minimum-difficulty-of-a-job-schedule/
LC2355 https://leetcode.cn/problems/maximum-number-of-books-you-can-take/
其他
LC42 接雨水 https://leetcode-cn.com/problems/trapping-rain-water/
评注:接雨水有三种不同的解法(DP、单调栈和双指针),其中双指针是 DP 的空间优化写法,讲解见 https://www.bilibili.com/video/BV1Qg411q7ia/
本质上是两种计算策略:计算每个下标处的接水量(纵向累加),计算一段高度对应的接水宽度(横向累加)
LC84 柱状图中最大的矩形 https://leetcode-cn.com/problems/largest-rectangle-in-histogram/ http://poj.org/problem?id=2559 http://poj.org/problem?id=2082
LC85 最大全 1 矩形(实现见下面的 maximalRectangleArea)https://leetcode-cn.com/problems/maximal-rectangle/ 原题为 http://poj.org/problem?id=3494
LC1504 全 1 矩形个数(实现见下面的 numSubmat)https://leetcode-cn.com/problems/count-submatrices-with-all-ones/
LC768 https://leetcode.cn/problems/max-chunks-to-make-sorted-ii/
LC2735 https://leetcode.cn/problems/collecting-chocolates/solutions/2305119/xian-xing-zuo-fa-by-heltion-ypdx/
LC2736 https://leetcode.cn/problems/maximum-sum-queries/
后缀数组+不同矩形对应方案数之和 https://codeforces.com/edu/course/2/lesson/2/5/practice/contest/269656/problem/D
与 bitOpTrickCnt 结合(见 bits.go)https://codeforces.com/problemset/problem/875/D
已知部分 right 还原全部 right;已知 right 还原 a https://codeforces.com/problemset/problem/1158/C
*/
func monotoneStack(a []int) ([]int, []int) {
const mod int = 1e9 + 7
// 考察局部最小
// 如果有相同元素,需要把某一侧循环内的符号改成小于等于
// 求左侧严格小于 a[i] 的最近位置 left[i],这样 a[i] 就是区间 [left[i]+1,i] 内最小的元素(之一)
// 如果改成求左侧小于等于,那么 a[i] 就是区间 [left[i]+1,i] 内独一无二的最小元素
// 不存在时 left[i] = -1
// 虽然写了个二重循环,但站在每个元素的视角看,这个元素在二重循环中最多入栈出栈各一次,因此整个二重循环的时间复杂度为 O(n)
n := len(a)
left := make([]int, n)
st := []int{-1} // 栈底哨兵,在栈为空时可以直接把 left[i] 赋值为 -1
for i, v := range a {
// 求左侧 < v : >=
// 求左侧 <= v : >
// 求左侧 > v : <=
// 求左侧 >= v : <
for len(st) > 1 && a[st[len(st)-1]] >= v { // 这里的符号和要求的是反过来的
st = st[:len(st)-1]
}
// 不断弹出 >= v 的,那么循环结束后栈顶就是 < v 的
left[i] = st[len(st)-1]
st = append(st, i)
}
// 求右侧严格小于 a[i] 的最近位置 right[i],这样 a[i] 就是区间 [i,right[i]-1] 内最小的元素(之一)
// 如果改成求右侧小于等于,那么 a[i] 就是区间 [i,right[i]-1] 内独一无二的最小元素
// 不存在时 right[i] = n
right := make([]int, n)
st = []int{n}
for i := n - 1; i >= 0; i-- {
v := a[i]
for len(st) > 1 && a[st[len(st)-1]] >= v { // 同上
st = st[:len(st)-1]
}
right[i] = st[len(st)-1]
st = append(st, i)
}
sum := make([]int, n+1) // int64
for i, v := range a {
sum[i+1] = (sum[i] + v) % mod
}
// EXTRA:计算贡献(注意取模时避免出现负数)
for i, v := range a {
_ = v
//l, r := left[i]+1, right[i] // [l,r) 左闭右开
|
}
{
// TIPS: 如果有一侧定义成小于等于,还可以一次遍历求出 left 和 right
left := make([]int, n)
right := make([]int, n)
for i := range right {
right[i] = n
}
st := []int{-1}
for i, v := range a {
for len(st) > 1 && a[st[len(st)-1]] >= v { // 这里是 right 小于等于
right[st[len(st)-1]] = i
st = st[:len(st)-1]
}
left[i] = st[len(st)-1]
st = append(st, i)
}
}
// EXTRA: 求所有长为 i 的子区间的最小值的最大值
// https://codeforces.com/problemset/problem/547/B LC1950 https://leetcode-cn.com/problems/maximum-of-minimum-values-in-all-subarrays/
{
ans := make([]int, n+1)
for i := range ans {
ans[i] = -2e9
}
for i, v := range a {
sz := right[i] - left[i] - 1
if v > ans[s
|
tot := (i - left[i]) * (right[i] - i)
_ = tot
//tot := (sum[r] + mod - sum[l]) % mod
|
random_line_split
|
monotone_stack.go
|
}
}
// EXTRA: 求所有长为 i 的子区间的最小值的最大值
// https://codeforces.com/problemset/problem/547/B LC1950 https://leetcode-cn.com/problems/maximum-of-minimum-values-in-all-subarrays/
{
ans := make([]int, n+1)
for i := range ans {
ans[i] = -2e9
}
for i, v := range a {
sz := right[i] - left[i] - 1
if v > ans[sz] {
ans[sz] = v
}
}
for i := n - 1; i > 0; i-- {
if ans[i+1] > ans[i] {
ans[i] = ans[i+1]
}
}
// ans[1:]
}
return left, right
}
// 注:若输入的是一个 1~n 的排列,求两侧大于/小于位置有更简单的写法
// 用双向链表思考(代码实现时用的数组):
// - 把 perm 转换成双向链表,按元素值**从小到大**遍历 perm[i],那么 perm[i] 左右两侧的就是大于 perm[i] 的元素
// - 算完 perm[i] 后把 perm[i] 从链表中删掉
// 为避免判断下标越界,传入的 perm 虽然下标是从 0 开始的,但视作从 1 开始(不存在时表示为 0 或 n+1)
// https://codeforces.com/contest/1156/problem/E
// https://atcoder.jp/contests/abc140/tasks/abc140_e
func permLR(perm []int) ([]int, []int) {
n := len(perm)
pos := make([]int, n+1)
left := make([]int, n+2)
right := make([]int, n+1)
for i := 1; i <= n; i++ {
pos[perm[i-1]] = i
left[i], right[i] = i-1, i+1
}
right[0] = 1
left[n+1] = n // 哨兵(本题不需要这两行,但是某些题目需要,比如 https://codeforces.com/problemset/problem/1154/E)
del := func(i int) {
l, r := left[i], right[i]
right[l] = r
left[r] = l
}
// 正序遍历求出的是两侧大于位置
// 倒序遍历求出的是两侧小于位置
for v := 1; v <= n; v++ {
i := pos[v]
l, r := left[i], right[i]
// do ...
_, _ = l, r
del(i) // 从链表中删除 v
}
return left, right
}
// 最大全 1 矩形
// LC85 https://leetcode-cn.com/problems/maximal-rectangle/
func maximalRectangleArea(mat [][]int) (ans int) {
const target = 1
n, m := len(mat), len(mat[0])
heights := make([][]int, n) // heights[i][j] 表示从 (i,j) 往上看的高度(连续 1 的长度),mat[i][j] = 0 时为 0
for i, row := range mat {
heights[i] = make([]int, m)
for j, v := range row {
if v == target {
if i == 0 {
heights[i][j] = 1
} else {
heights[i][j] = heights[i-1][j] + 1
}
}
}
}
// 然后枚举每一行,就变成 LC84 这题了
type pair struct{ h, i int }
for _, hs := range heights {
left := make([]int, m)
stack := []pair{{-1, -1}}
for j, h := range hs {
for {
if top := stack[len(stack)-1]; top.h < h {
left[j] = top.i
break
}
stack = stack[:len(stack)-1]
}
stack = append(stack, pair{h, j})
}
right := make([]int, m)
stack = []pair{{-1, m}}
for j := m - 1; j >= 0; j-- {
h := hs[j]
for {
if top := stack[len(stack)-1]; top.h < h {
right[j] = top.i
break
}
stack = stack[:len(stack)-1]
}
stack = append(stack, pair{h, j})
}
for j, h := range hs {
if area := (right[j] - left[j] - 1) * h; area > ans {
ans = area
}
}
}
return
}
// 全 1 矩形个数
// LC1504 https://leetcode-cn.com/problems/count-submatrices-with-all-ones/
// 参考 https://leetcode.com/problems/count-submatrices-with-all-ones/discuss/720265/Java-Detailed-Explanation-From-O(MNM)-to-O(MN)-by-using-Stack
func numSubmat(mat [][]int) (ans int) {
m := len(mat[0])
heights := make([]int, m)
for _, row := range mat {
sum := make([]int, m)
type pair struct{ h, j int }
stack := []pair{{-1, -1}}
for j, v := range row {
if v == 0 {
heights[j] = 0
} else {
heights[j]++
}
h := heights[j]
for {
if top := stack[len(stack)-1]; top.h < h {
if pre := top.j; pre < 0 {
sum[j] = (j + 1) * h
} else {
sum[j] = sum[pre] + (j-pre)*h
}
ans += sum[j]
break
}
stack = stack[:len(stack)-1]
}
stack = append(stack, pair{h, j})
}
}
return
}
// 字典序最小的无重复字符的子序列,包含原串所有字符
// LC316 https://leetcode.cn/problems/remove-duplicate-letters/
// https://atcoder.jp/contests/abc299/tasks/abc299_g
// EXTRA: 重复个数不超过 limit https://leetcode.cn/contest/tianchi2022/problems/ev2bru/
func removeDuplicateLetters(s string) string {
left := ['z' + 1]int{}
for _, c := range s {
left[c]++
}
st := []rune{}
inSt := ['z' + 1]bool{}
for _, c := range s {
left[c]--
if inSt[c] {
continue
}
for len(st) > 0 && c < st[len(st)-1] && left[st[len(st)-1]] > 0 {
top := st[len(st)-1]
st = st[:len(st)-1]
inSt[top] = false // top > c,且 top 后面还有,那么可以重新加进来
}
st = append(st, c)
inSt[c] = true
}
return string(st)
}
// 求 a 的最长的子数组,其元素和大于 lowerSum
// 返回任意一个符合要求的子数组的左右端点(闭区间)
// 如果不存在,返回 [-1,-1]
// 讲解:https://leetcode.cn/problems/longest-well-performing-interval/solution/liang-chong-zuo-fa-liang-zhang-tu-miao-d-hysl/
// LC962 https://leetcode.cn/problems/maximum-width-ramp/
// LC1124 https://leetcode.cn/problems/longest-well-performing-interval/
// 有点相关 http://codeforces.com/problemset/problem/1788/E
func longestSubarrayWithLowerSum(a []int, lowerSum int) (int, int) {
n := len(a)
sum := make([]int, n+1)
st := []int{0}
for j, v := range a {
j++
sum[j] = sum[j-1] + v
if sum[j] < sum[st[len(st)-1]] {
st = append(st, j)
}
}
l, r := -1, 0
for i := n; i > 0; i-- {
for len(st) > 0 && sum[i]-sum[st[len(st)-1]] > lowerSum {
j := st[len(st)-1]
st = st[:len(st)-1]
if l < 0 || i-j < r-l {
l, r = j, i
}
}
}
r-- // 闭区间
return l, r
}
|
identifier_name
|
||
monotone_stack.go
|
int, n)
for i := range right {
right[i] = n
}
st := []int{-1}
for i, v := range a {
for len(st) > 1 && a[st[len(st)-1]] >= v { // 这里是 right 小于等于
right[st[len(st)-1]] = i
st = st[:len(st)-1]
}
left[i] = st[len(st)-1]
st = append(st, i)
}
}
// EXTRA: 求所有长为 i 的子区间的最小值的最大值
// https://codeforces.com/problemset/problem/547/B LC1950 https://leetcode-cn.com/problems/maximum-of-minimum-values-in-all-subarrays/
{
ans := make([]int, n+1)
for i := range ans {
ans[i] = -2e9
}
for i, v := range a {
sz := right[i] - left[i] - 1
if v > ans[sz] {
ans[sz] = v
}
}
for i := n - 1; i > 0; i-- {
if ans[i+1] > ans[i] {
ans[i] = ans[i+1]
}
}
// ans[1:]
}
return left, right
}
// 注:若输入的是一个 1~n 的排列,求两侧大于/小于位置有更简单的写法
// 用双向链表思考(代码实现时用的数组):
// - 把 perm 转换成双向链表,按元素值**从小到大**遍历 perm[i],那么 perm[i] 左右两侧的就是大于 perm[i] 的元素
// - 算完 perm[i] 后把 perm[i] 从链表中删掉
// 为避免判断下标越界,传入的 perm 虽然下标是从 0 开始的,但视作从 1 开始(不存在时表示为 0 或 n+1)
// https://codeforces.com/contest/1156/problem/E
// https://atcoder.jp/contests/abc140/tasks/abc140_e
func permLR(perm []int) ([]int, []int) {
n := len(perm)
pos := make([]int, n+1)
left := make([]int, n+2)
right := make([]int, n+1)
for i := 1; i <= n; i++ {
pos[perm[i-1]] = i
left[i], right[i] = i-1, i+1
}
right[0] = 1
left[n+1] = n // 哨兵(本题不需要这两行,但是某些题目需要,比如 https://codeforces.com/problemset/problem/1154/E)
del := func(i int) {
l, r := left[i], right[i]
right[l] = r
left[r] = l
}
// 正序遍历求出的是两侧大于位置
// 倒序遍历求出的是两侧小于位置
for v := 1; v <= n; v++ {
i := pos[v]
l, r := left[i], right[i]
// do ...
_, _ = l, r
del(i) // 从链表中删除 v
}
return left, right
}
// 最大全 1 矩形
// LC85 https://leetcode-cn.com/problems/maximal-rectangle/
func maximalRectangleArea(mat [][]int) (ans int) {
const target = 1
n, m := len(mat), len(mat[0])
heights := make([][]int, n) // heights[i][j] 表示从 (i,j) 往上看的高度(连续 1 的长度),mat[i][j] = 0 时为 0
for i, row := range mat {
heights[i] = make([]int, m)
for j, v := range row {
if v == target {
if i == 0 {
heights[i][j] = 1
} else {
heights[i][j] = heights[i-1][j] + 1
}
}
}
}
// 然后枚举每一行,就变成 LC84 这题了
type pair struct{ h, i int }
for _, hs := range heights {
left := make([]int, m)
stack := []pair{{-1, -1}}
for j, h := range hs {
for {
if top := stack[len(stack)-1]; top.h < h {
left[j] = top.i
break
}
stack = stack[:len(stack)-1]
}
stack = append(stack, pair{h, j})
}
right := make([]int, m)
stack = []pair{{-1, m}}
for j := m - 1; j >= 0; j-- {
h := hs[j]
for {
if top := stack[len(stack)-1]; top.h < h {
right[j] = top.i
break
}
stack = stack[:len(stack)-1]
}
stack = append(stack, pair{h, j})
}
for j, h := range hs {
if area := (right[j] - left[j] - 1) * h; area > ans {
ans = area
}
}
}
return
}
// 全 1 矩形个数
// LC1504 https://leetcode-cn.com/problems/count-submatrices-with-all-ones/
// 参考 https://leetcode.com/problems/count-submatrices-with-all-ones/discuss/720265/Java-Detailed-Explanation-From-O(MNM)-to-O(MN)-by-using-Stack
func numSubmat(mat [][]int) (ans int) {
m := len(mat[0])
heights := make([]int, m)
for _, row := range mat {
sum := make([]int, m)
type pair struct{ h, j int }
stack := []pair{{-1, -1}}
for j, v := range row {
if v == 0 {
heights[j] = 0
} else {
heights[j]++
}
h := heights[j]
for {
if top := stack[len(stack)-1]; top.h < h {
if pre := top.j; pre < 0 {
sum[j] = (j + 1) * h
} else {
sum[j] = sum[pre] + (j-pre)*h
}
ans += sum[j]
break
}
stack = stack[:len(stack)-1]
}
stack = append(stack, pair{h, j})
}
}
return
}
// 字典序最小的无重复字符的子序列,包含原串所有字符
// LC316 https://leetcode.cn/problems/remove-duplicate-letters/
// https://atcoder.jp/contests/abc299/tasks/abc299_g
// EXTRA: 重复个数不超过 limit https://leetcode.cn/contest/tianchi2022/problems/ev2bru/
func removeDuplicateLetters(s string) string {
left := ['z' + 1]int{}
for _, c := range s {
left[c]++
}
st := []rune{}
inSt := ['z' + 1]bool{}
for _, c := range s {
left[c]--
if inSt[c] {
continue
}
for len(st) > 0 && c < st[len(st)-1] && left[st[len(st)-1]] > 0 {
top := st[len(st)-1]
st = st[:len(st)-1]
inSt[top] = false // top > c,且 top 后面还有,那么可以重新加进来
}
st = append(st, c)
inSt[c] = true
}
return string(st)
}
// 求 a 的最长的子数组,其元素和大于 lowerSum
// 返回任意一个符合要求的子数组的左右端点(闭区间)
// 如果不存在,返回 [-1,-1]
// 讲解:https://leetcode.cn/problems/longest-well-performing-interval/solution/liang-chong-zuo-fa-liang-zhang-tu-miao-d-hysl/
// LC962 https://leetcode.cn/problems/maximum-width-ramp/
// LC1124 https://leetcode.cn/problems/longest-well-performing-interval/
// 有点相关 http://codeforces.com/problemset/problem/1788/E
func longestSubarrayWithLowerSum(a []int, lowerSum int) (int
|
, int) {
n := len(a)
sum := make([]int, n+1)
st := []int{0}
for j, v := range a {
j++
sum[j] = sum[j-1] + v
if sum[j] < sum[st[len(st)-1]] {
st = append(st, j)
|
conditional_block
|
|
constants.rs
|
= -76;
// end-of-error-codes
pub const UNQLITE_CONFIG_JX9_ERR_LOG: c_int = 1;
pub const UNQLITE_CONFIG_MAX_PAGE_CACHE: c_int = 2;
pub const UNQLITE_CONFIG_ERR_LOG: c_int = 3;
pub const UNQLITE_CONFIG_KV_ENGINE: c_int = 4;
pub const UNQLITE_CONFIG_DISABLE_AUTO_COMMIT: c_int = 5;
pub const UNQLITE_CONFIG_GET_KV_NAME: c_int = 6;
// UnQLite/Jx9 Virtual Machine Configuration Commands.
//
// The following set of constants are the available configuration verbs that can
// be used by the host-application to configure the Jx9 (Via UnQLite) Virtual machine.
// These constants must be passed as the second argument to the [unqlite_vm_config()]
// interface.
// Each options require a variable number of arguments.
// The [unqlite_vm_config()] interface will return UNQLITE_OK on success, any other return
// value indicates failure.
// There are many options but the most importants are: UNQLITE_VM_CONFIG_OUTPUT which install
// a VM output consumer callback, UNQLITE_VM_CONFIG_HTTP_REQUEST which parse and register
// a HTTP request and UNQLITE_VM_CONFIG_ARGV_ENTRY which populate the $argv array.
// For a full discussion on the configuration verbs and their expected parameters, please
// refer to this page:
// http://unqlite.org/c_api/unqlite_vm_config.html
//
/// TWO ARGUMENTS: int (*xConsumer)(const void *, unsigned int, void *), void *
pub const UNQLITE_VM_CONFIG_OUTPUT: c_int = 1;
/// ONE ARGUMENT: const char *zIncludePath
pub const UNQLITE_VM_CONFIG_IMPORT_PATH: c_int = 2;
/// NO ARGUMENTS: Report all run-time errors in the VM output
pub const UNQLITE_VM_CONFIG_ERR_REPORT: c_int = 3;
/// ONE ARGUMENT: int nMaxDepth
pub const UNQLITE_VM_CONFIG_RECURSION_DEPTH: c_int = 4;
/// ONE ARGUMENT: unsigned int *pLength
pub const UNQLITE_VM_OUTPUT_LENGTH: c_int = 5;
/// TWO ARGUMENTS: const char *zName, unqlite_value *pValue
pub const UNQLITE_VM_CONFIG_CREATE_VAR: c_int = 6;
/// TWO ARGUMENTS: const char *zRawRequest, int nRequestLength
pub const UNQLITE_VM_CONFIG_HTTP_REQUEST: c_int = 7;
/// THREE ARGUMENTS: const char *zKey, const char *zValue, int nLen
pub const UNQLITE_VM_CONFIG_SERVER_ATTR: c_int = 8;
/// THREE ARGUMENTS: const char *zKey, const char *zValue, int nLen
pub const UNQLITE_VM_CONFIG_ENV_ATTR: c_int = 9;
/// ONE ARGUMENT: unqlite_value **ppValue
pub const UNQLITE_VM_CONFIG_EXEC_VALUE: c_int = 10;
/// ONE ARGUMENT: const unqlite_io_stream *pStream
pub const UNQLITE_VM_CONFIG_IO_STREAM: c_int = 11;
/// ONE ARGUMENT: const char *zValue
pub const UNQLITE_VM_CONFIG_ARGV_ENTRY: c_int = 12;
/// TWO ARGUMENTS: const void **ppOut, unsigned int *pOutputLen
pub const UNQLITE_VM_CONFIG_EXTRACT_OUTPUT: c_int = 13;
// Storage engine configuration commands.
//
// The following set of constants are the available configuration verbs that can
// be used by the host-application to configure the underlying storage engine
// (i.e Hash, B+tree, R+tree).
//
// These constants must be passed as the first argument to [unqlite_kv_config()].
// Each options require a variable number of arguments.
// The [unqlite_kv_config()] interface will return UNQLITE_OK on success, any other return
// value indicates failure.
// For a full discussion on the configuration verbs and their expected parameters, please
// refer to this page:
// http://unqlite.org/c_api/unqlite_kv_config.html
//
/// ONE ARGUMENT: unsigned int (*xHash)(const void *,unsigned int)
pub const UNQLITE_KV_CONFIG_HASH_FUNC: c_int = 1;
/// ONE ARGUMENT: int (*xCmp)(const void *,const void *,unsigned int)
pub const UNQLITE_KV_CONFIG_CMP_FUNC: c_int = 2;
// Global Library Configuration Commands.
//
// The following set of constants are the available configuration verbs that can
// be used by the host-application to configure the whole library.
// These constants must be passed as the first argument to [unqlite_lib_config()].
//
// Each options require a variable number of arguments.
// The [unqlite_lib_config()] interface will return UNQLITE_OK on success, any other return
// value indicates failure.
// Notes:
// The default configuration is recommended for most applications and so the call to
// [unqlite_lib_config()] is usually not necessary. It is provided to support rare
// applications with unusual needs.
// The [unqlite_lib_config()] interface is not threadsafe. The application must insure that
// no other [unqlite_*()] interfaces are invoked by other threads while [unqlite_lib_config()]
// is running. Furthermore, [unqlite_lib_config()] may only be invoked prior to library
// initialization using [unqlite_lib_init()] or [unqlite_init()] or after shutdown
// by [unqlite_lib_shutdown()]. If [unqlite_lib_config()] is called after [unqlite_lib_init()]
// or [unqlite_init()] and before [unqlite_lib_shutdown()] then it will return UNQLITE_LOCKED.
// For a full discussion on the configuration verbs and their expected parameters, please
// refer to this page:
// http://unqlite.org/c_api/unqlite_lib.html
//
/// ONE ARGUMENT: const SyMemMethods *pMemMethods
pub const UNQLITE_LIB_CONFIG_USER_MALLOC: c_int = 1;
/// TWO ARGUMENTS: int (*xMemError)(void *), void *pUserData
pub const UNQLITE_LIB_CONFIG_MEM_ERR_CALLBACK: c_int = 2;
/// ONE ARGUMENT: const SyMutexMethods *pMutexMethods
pub const UNQLITE_LIB_CONFIG_USER_MUTEX: c_int = 3;
/// NO ARGUMENTS
pub const UNQLITE_LIB_CONFIG_THREAD_LEVEL_SINGLE: c_int = 4;
/// NO ARGUMENTS
pub const UNQLITE_LIB_CONFIG_THREAD_LEVEL_MULTI: c_int = 5;
/// ONE ARGUMENT: const unqlite_vfs *pVfs
pub const UNQLITE_LIB_CONFIG_VFS: c_int = 6;
/// ONE ARGUMENT: unqlite_kv_methods *pStorage
pub const UNQLITE_LIB_CONFIG_STORAGE_ENGINE: c_int = 7;
/// ONE ARGUMENT: int iPageSize
pub const UNQLITE_LIB_CONFIG_PAGE_SIZE: c_int = 8;
// These bit values are intended for use in the 3rd parameter to the [unqlite_open()] interface
// and in the 4th parameter to the xOpen method of the [unqlite_vfs] object.
//
/// Read only mode. Ok for [unqlite_open]
pub const UNQLITE_OPEN_READONLY: c_uint = 0x00000001;
/// Ok for [unqlite_open]
pub const UNQLITE_OPEN_READWRITE: c_uint = 0x00000002;
/// Ok for [unqlite_open]
pub const UNQLITE_OPEN_CREATE: c_uint = 0x00000004;
/// VFS only
pub const UNQLITE_OPEN_EXCLUSIVE: c_uint = 0x00000008;
/// VFS only
pub const UNQLITE_OPEN_TEMP_DB: c_uint = 0x00000010;
/// Ok for [unqlite_open]
pub const UNQLITE_OPEN_NOMUTEX: c_uint = 0x00000020;
/// Omit journaling for this database. Ok for [unqlite_open]
pub const UNQLITE_OPEN_OMIT_JOURNALING: c_uint = 0x00000040;
/// An in memory database. Ok for [unqlite_open]
pub const UNQLITE_OPEN_IN_MEMORY: c_uint = 0x00000080;
/// Obtain a memory view of the whole file. Ok for [unqlite_open]
pub const UNQLITE_OPEN_MMAP: c_uint = 0x00000100;
// Synchronization Type Flags
//
// When UnQLite invokes the xSync() method of an [unqlite_io_methods] object it uses
// a combination of these integer values as the second argument.
//
// When the UNQLITE_SYNC_DATAONLY flag is used, it means that the sync operation only
// needs to flush data to mass storage.: c_int = Inode information need not be flushed.
// If the lower four bits of the flag equal UNQLITE_SYNC_NORMAL, that means to use normal
// fsync() semantics. If the lower four bits equal UNQLITE_SYNC_FULL, that means to use
// Mac OS X style fullsync instead of fsync().
//
|
random_line_split
|
||
segment.pb.go
|
// road classes are based on OpenStreetMap usage of the "highway" tag.
// each value of the enumeration corresponds to one value of the tag,
// except for ClassServiceOther, which is used for service and other roads.
type Segment_RoadClass int32
const (
Segment_ClassMotorway Segment_RoadClass = 0
Segment_ClassTrunk Segment_RoadClass = 1
Segment_ClassPrimary Segment_RoadClass = 2
Segment_ClassSecondary Segment_RoadClass = 3
Segment_ClassTertiary Segment_RoadClass = 4
Segment_ClassUnclassified Segment_RoadClass = 5
Segment_ClassResidential Segment_RoadClass = 6
Segment_ClassServiceOther Segment_RoadClass = 7
)
var Segment_RoadClass_name = map[int32]string{
0: "ClassMotorway",
1: "ClassTrunk",
2: "ClassPrimary",
3: "ClassSecondary",
4: "ClassTertiary",
5: "ClassUnclassified",
6: "ClassResidential",
7: "ClassServiceOther",
}
var Segment_RoadClass_value = map[string]int32{
"ClassMotorway": 0,
"ClassTrunk": 1,
"ClassPrimary": 2,
"ClassSecondary": 3,
"ClassTertiary": 4,
"ClassUnclassified": 5,
"ClassResidential": 6,
"ClassServiceOther": 7,
}
func (x Segment_RoadClass) Enum() *Segment_RoadClass {
p := new(Segment_RoadClass)
*p = x
return p
}
func (x Segment_RoadClass) String() string {
return proto.EnumName(Segment_RoadClass_name, int32(x))
}
func (x *Segment_RoadClass) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(Segment_RoadClass_value, data, "Segment_RoadClass")
if err != nil {
return err
}
*x = Segment_RoadClass(value)
return nil
}
func (Segment_RoadClass) EnumDescriptor() ([]byte, []int) { return fileDescriptorSegment, []int{0, 0} }
// form of way describes the physical attributes of the road.
type Segment_FormOfWay int32
const (
// use FowUndefined if you do not know what physical attributes the road
// has.
Segment_FowUndefined Segment_FormOfWay = 0
// use FowMotorway for motorways.
Segment_FowMotorway Segment_FormOfWay = 1
// use FowMultipleCarriageway for multiple carriageway roads. that is, when
// there are separate OSM ways for each direction of travel.
Segment_FowMultipleCarriageway Segment_FormOfWay = 2
// use FowSingleCarriageway for other roads.
Segment_FowSingleCarriageway Segment_FormOfWay = 3
// use FowRoundabout for roundabouts
Segment_FowRoundabout Segment_FormOfWay = 4
// use FowTrafficSquare for roads which enclose an area, but which are not
// roundabouts
Segment_FowTrafficSquare Segment_FormOfWay = 5
// use FowSlipRoad for slip roads, ramps and other links.
Segment_FowSlipRoad Segment_FormOfWay = 6
// use FowOther for roads which do not match any of the above definitions,
// but for which the form of way is known.
Segment_FowOther Segment_FormOfWay = 7
)
var Segment_FormOfWay_name = map[int32]string{
0: "FowUndefined",
1: "FowMotorway",
2: "FowMultipleCarriageway",
3: "FowSingleCarriageway",
4: "FowRoundabout",
5: "FowTrafficSquare",
6: "FowSlipRoad",
7: "FowOther",
}
var Segment_FormOfWay_value = map[string]int32{
"FowUndefined": 0,
"FowMotorway": 1,
"FowMultipleCarriageway": 2,
"FowSingleCarriageway": 3,
"FowRoundabout": 4,
"FowTrafficSquare": 5,
"FowSlipRoad": 6,
"FowOther": 7,
}
func (x Segment_FormOfWay) Enum() *Segment_FormOfWay {
p := new(Segment_FormOfWay)
*p = x
return p
}
func (x Segment_FormOfWay) String() string {
return proto.EnumName(Segment_FormOfWay_name, int32(x))
}
func (x *Segment_FormOfWay) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(Segment_FormOfWay_value, data, "Segment_FormOfWay")
if err != nil {
return err
}
*x = Segment_FormOfWay(value)
return nil
}
func (Segment_FormOfWay) EnumDescriptor() ([]byte, []int) { return fileDescriptorSegment, []int{0, 1} }
type Segment struct {
// a segment is a list of at least two LocationReferences.
//
// all but the last LocationReference must contain a full set of data for
// each field, but the final one should consist of only a reference
// coordinate. any other information on the final LocationReference may be
// ignored.
//
// a segment with only a single LocationReference is invalid and may be
// ignored.
Lrps []*Segment_LocationReference `protobuf:"bytes,1,rep,name=lrps" json:"lrps,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Segment) Reset() { *m = Segment{} }
func (m *Segment) String() string { return proto.CompactTextString(m) }
func (*Segment) ProtoMessage() {}
func (*Segment) Descriptor() ([]byte, []int) { return fileDescriptorSegment, []int{0} }
func (m *Segment) GetLrps() []*Segment_LocationReference {
if m != nil {
return m.Lrps
}
return nil
}
type Segment_LatLng struct {
// lat & lng in EPSG:4326 multiplied by 10^7 and rounded to the nearest
// integer. this gives a precision of about 1.1cm (7/16ths of an inch)
// worst case at the equator.
Lat *int32 `protobuf:"fixed32,1,opt,name=lat" json:"lat,omitempty"`
Lng *int32 `protobuf:"fixed32,2,opt,name=lng" json:"lng,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Segment_LatLng) Reset() { *m = Segment_LatLng{} }
func (m *Segment_LatLng) String() string { return proto.CompactTextString(m) }
func (*Segment_LatLng) ProtoMessage() {}
func (*Segment_LatLng) Descriptor() ([]byte, []int) { return fileDescriptorSegment, []int{0, 0} }
func (m *Segment_LatLng) GetLat() int32 {
if m != nil && m.Lat != nil {
return *m.Lat
}
return 0
}
func (m *Segment_LatLng) GetLng() int32 {
if m != nil && m.Lng != nil {
return *m.Lng
}
return 0
}
// a segment consists of multiple LocationReferences, each of which describes
// the road at a particular reference coordinate, or properties of the road
// between the current LocationReference and the next.
//
// the first and last LocationReference reference coordinates will usually be at
// "true" intersections, which are intersections where there are multiple
// paths through the intersection. This excludes "false" intersections where
// two roads cross, but there are not multiple paths, such as overpasses,
// bridges, changes of road name or properties.
//
// ___
// | / \ _4---5--
// -1-----2 \ __/ |
// | 3__/ |
//
// in the example above, the Segment consists of 5 LocationReferences,
// numbered 1-5. locations 1 & 5 are at true intersections with other roads
// and 2, 3 & 4 are intermediate LocationReferences inserted due to the length
// of the road.
//
// Occasionally, a LocationReference can be inserted along a road (i.e., not at
// a true intersection) to break long road segments into multiple OSMLR
// segments.
//
type Segment_LocationReference struct {
// the reference coordinate.
Coord *Segment_LatLng `protobuf:"bytes,1,opt,name=coord" json:"coord,omitempty"`
// bearing in degrees clockwise from true north between 0 and 359 - will
// generally
|
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
random_line_split
|
|
segment.pb.go
|
) String() string {
return proto.EnumName(Segment_FormOfWay_name, int32(x))
}
func (x *Segment_FormOfWay) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(Segment_FormOfWay_value, data, "Segment_FormOfWay")
if err != nil {
return err
}
*x = Segment_FormOfWay(value)
return nil
}
func (Segment_FormOfWay) EnumDescriptor() ([]byte, []int) { return fileDescriptorSegment, []int{0, 1} }
type Segment struct {
// a segment is a list of at least two LocationReferences.
//
// all but the last LocationReference must contain a full set of data for
// each field, but the final one should consist of only a reference
// coordinate. any other information on the final LocationReference may be
// ignored.
//
// a segment with only a single LocationReference is invalid and may be
// ignored.
Lrps []*Segment_LocationReference `protobuf:"bytes,1,rep,name=lrps" json:"lrps,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Segment) Reset() { *m = Segment{} }
func (m *Segment) String() string { return proto.CompactTextString(m) }
func (*Segment) ProtoMessage()
|
func (*Segment) Descriptor() ([]byte, []int) { return fileDescriptorSegment, []int{0} }
func (m *Segment) GetLrps() []*Segment_LocationReference {
if m != nil {
return m.Lrps
}
return nil
}
type Segment_LatLng struct {
// lat & lng in EPSG:4326 multiplied by 10^7 and rounded to the nearest
// integer. this gives a precision of about 1.1cm (7/16ths of an inch)
// worst case at the equator.
Lat *int32 `protobuf:"fixed32,1,opt,name=lat" json:"lat,omitempty"`
Lng *int32 `protobuf:"fixed32,2,opt,name=lng" json:"lng,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Segment_LatLng) Reset() { *m = Segment_LatLng{} }
func (m *Segment_LatLng) String() string { return proto.CompactTextString(m) }
func (*Segment_LatLng) ProtoMessage() {}
func (*Segment_LatLng) Descriptor() ([]byte, []int) { return fileDescriptorSegment, []int{0, 0} }
func (m *Segment_LatLng) GetLat() int32 {
if m != nil && m.Lat != nil {
return *m.Lat
}
return 0
}
func (m *Segment_LatLng) GetLng() int32 {
if m != nil && m.Lng != nil {
return *m.Lng
}
return 0
}
// a segment consists of multiple LocationReferences, each of which describes
// the road at a particular reference coordinate, or properties of the road
// between the current LocationReference and the next.
//
// the first and last LocationReference reference coordinates will usually be at
// "true" intersections, which are intersections where there are multiple
// paths through the intersection. This excludes "false" intersections where
// two roads cross, but there are not multiple paths, such as overpasses,
// bridges, changes of road name or properties.
//
// ___
// | / \ _4---5--
// -1-----2 \ __/ |
// | 3__/ |
//
// in the example above, the Segment consists of 5 LocationReferences,
// numbered 1-5. locations 1 & 5 are at true intersections with other roads
// and 2, 3 & 4 are intermediate LocationReferences inserted due to the length
// of the road.
//
// Occasionally, a LocationReference can be inserted along a road (i.e., not at
// a true intersection) to break long road segments into multiple OSMLR
// segments.
//
type Segment_LocationReference struct {
// the reference coordinate.
Coord *Segment_LatLng `protobuf:"bytes,1,opt,name=coord" json:"coord,omitempty"`
// bearing in degrees clockwise from true north between 0 and 359 - will
// generally fit in a couple of bytes varint.
//
// the bearing should be calculated toward a point 20m along the road from
// the reference coordinate towards the next LocationReference. if this is
// the final LocationReference, then omit the bearing.
//
// each LocationReference, of which there may be several in this Segment,
// except for the last must have a bearing calculated from the reference
// coordinate of this LocationReference.
Bear *uint32 `protobuf:"varint,2,opt,name=bear" json:"bear,omitempty"`
// road class at the reference coordinate.
StartFrc *Segment_RoadClass `protobuf:"varint,3,opt,name=start_frc,json=startFrc,enum=opentraffic.osmlr.Segment_RoadClass" json:"start_frc,omitempty"`
// form of way at the reference coordinate.
StartFow *Segment_FormOfWay `protobuf:"varint,4,opt,name=start_fow,json=startFow,enum=opentraffic.osmlr.Segment_FormOfWay" json:"start_fow,omitempty"`
// lowest road class (most important road) between the start coordinate
// and the next LocationReference.
LeastFrc *Segment_RoadClass `protobuf:"varint,5,opt,name=least_frc,json=leastFrc,enum=opentraffic.osmlr.Segment_RoadClass" json:"least_frc,omitempty"`
// length in meters, rounded to the nearest meter. the maximum allowed
// length is 15km, but most segments will be much shorter, so a varint
// representation makes sense.
//
// if the length between successive LocationReferences is more than 15km
// then you MUST insert an intermediate LocationReference.
Length *uint32 `protobuf:"varint,6,opt,name=length" json:"length,omitempty"`
// Is this LRP at a node/intersection (true) or along a road (false)?
// This hint can be useful wen associating OSMLR to routing graphs
AtNode *bool `protobuf:"varint,7,opt,name=at_node,json=atNode" json:"at_node,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Segment_LocationReference) Reset() { *m = Segment_LocationReference{} }
func (m *Segment_LocationReference) String() string { return proto.CompactTextString(m) }
func (*Segment_LocationReference) ProtoMessage() {}
func (*Segment_LocationReference) Descriptor() ([]byte, []int) {
return fileDescriptorSegment, []int{0, 1}
}
func (m *Segment_LocationReference) GetCoord() *Segment_LatLng {
if m != nil {
return m.Coord
}
return nil
}
func (m *Segment_LocationReference) GetBear() uint32 {
if m != nil && m.Bear != nil {
return *m.Bear
}
return 0
}
func (m *Segment_LocationReference) GetStartFrc() Segment_RoadClass {
if m != nil && m.StartFrc != nil {
return *m.StartFrc
}
return Segment_ClassMotorway
}
func (m *Segment_LocationReference) GetStartFow() Segment_FormOfWay {
if m != nil && m.StartFow != nil {
return *m.StartFow
}
return Segment_FowUndefined
}
func (m *Segment_LocationReference) GetLeastFrc() Segment_RoadClass {
if m != nil && m.LeastFrc != nil {
return *m.LeastFrc
}
return Segment_ClassMotorway
}
func (m *Segment_LocationReference) GetLength() uint32 {
if m != nil && m.Length != nil {
return *m.Length
}
return 0
}
func (m *Segment_LocationReference) GetAtNode() bool {
if m != nil && m.AtNode != nil {
return *m.AtNode
}
return false
}
func init() {
proto.RegisterType((*Segment)(nil), "opentraffic.osmlr.Segment")
proto.RegisterType((*Segment_LatLng)(nil), "opentraffic.osmlr.Segment.LatLng")
proto.RegisterType((*Segment_LocationReference)(nil), "opentraffic.osmlr.Segment.LocationReference")
proto.RegisterEnum("opentraffic.osmlr.Segment_RoadClass", Segment_RoadClass_name, Segment_RoadClass_value)
proto.RegisterEnum("opentraffic.osmlr.Segment_FormOfWay", Segment_FormOfWay_name, Segment_FormOfWay_value)
}
func init() { proto.RegisterFile("segment.proto", fileDescriptorSegment) }
var fileDescriptorSegment = []byte{
// 529 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0xcb, 0x
|
{}
|
identifier_body
|
segment.pb.go
|
) String() string {
return proto.EnumName(Segment_FormOfWay_name, int32(x))
}
func (x *Segment_FormOfWay) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(Segment_FormOfWay_value, data, "Segment_FormOfWay")
if err != nil {
return err
}
*x = Segment_FormOfWay(value)
return nil
}
func (Segment_FormOfWay) EnumDescriptor() ([]byte, []int) { return fileDescriptorSegment, []int{0, 1} }
type Segment struct {
// a segment is a list of at least two LocationReferences.
//
// all but the last LocationReference must contain a full set of data for
// each field, but the final one should consist of only a reference
// coordinate. any other information on the final LocationReference may be
// ignored.
//
// a segment with only a single LocationReference is invalid and may be
// ignored.
Lrps []*Segment_LocationReference `protobuf:"bytes,1,rep,name=lrps" json:"lrps,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Segment) Reset() { *m = Segment{} }
func (m *Segment) String() string { return proto.CompactTextString(m) }
func (*Segment) ProtoMessage() {}
func (*Segment)
|
() ([]byte, []int) { return fileDescriptorSegment, []int{0} }
func (m *Segment) GetLrps() []*Segment_LocationReference {
if m != nil {
return m.Lrps
}
return nil
}
type Segment_LatLng struct {
// lat & lng in EPSG:4326 multiplied by 10^7 and rounded to the nearest
// integer. this gives a precision of about 1.1cm (7/16ths of an inch)
// worst case at the equator.
Lat *int32 `protobuf:"fixed32,1,opt,name=lat" json:"lat,omitempty"`
Lng *int32 `protobuf:"fixed32,2,opt,name=lng" json:"lng,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Segment_LatLng) Reset() { *m = Segment_LatLng{} }
func (m *Segment_LatLng) String() string { return proto.CompactTextString(m) }
func (*Segment_LatLng) ProtoMessage() {}
func (*Segment_LatLng) Descriptor() ([]byte, []int) { return fileDescriptorSegment, []int{0, 0} }
func (m *Segment_LatLng) GetLat() int32 {
if m != nil && m.Lat != nil {
return *m.Lat
}
return 0
}
func (m *Segment_LatLng) GetLng() int32 {
if m != nil && m.Lng != nil {
return *m.Lng
}
return 0
}
// a segment consists of multiple LocationReferences, each of which describes
// the road at a particular reference coordinate, or properties of the road
// between the current LocationReference and the next.
//
// the first and last LocationReference reference coordinates will usually be at
// "true" intersections, which are intersections where there are multiple
// paths through the intersection. This excludes "false" intersections where
// two roads cross, but there are not multiple paths, such as overpasses,
// bridges, changes of road name or properties.
//
// ___
// | / \ _4---5--
// -1-----2 \ __/ |
// | 3__/ |
//
// in the example above, the Segment consists of 5 LocationReferences,
// numbered 1-5. locations 1 & 5 are at true intersections with other roads
// and 2, 3 & 4 are intermediate LocationReferences inserted due to the length
// of the road.
//
// Occasionally, a LocationReference can be inserted along a road (i.e., not at
// a true intersection) to break long road segments into multiple OSMLR
// segments.
//
type Segment_LocationReference struct {
// the reference coordinate.
Coord *Segment_LatLng `protobuf:"bytes,1,opt,name=coord" json:"coord,omitempty"`
// bearing in degrees clockwise from true north between 0 and 359 - will
// generally fit in a couple of bytes varint.
//
// the bearing should be calculated toward a point 20m along the road from
// the reference coordinate towards the next LocationReference. if this is
// the final LocationReference, then omit the bearing.
//
// each LocationReference, of which there may be several in this Segment,
// except for the last must have a bearing calculated from the reference
// coordinate of this LocationReference.
Bear *uint32 `protobuf:"varint,2,opt,name=bear" json:"bear,omitempty"`
// road class at the reference coordinate.
StartFrc *Segment_RoadClass `protobuf:"varint,3,opt,name=start_frc,json=startFrc,enum=opentraffic.osmlr.Segment_RoadClass" json:"start_frc,omitempty"`
// form of way at the reference coordinate.
StartFow *Segment_FormOfWay `protobuf:"varint,4,opt,name=start_fow,json=startFow,enum=opentraffic.osmlr.Segment_FormOfWay" json:"start_fow,omitempty"`
// lowest road class (most important road) between the start coordinate
// and the next LocationReference.
LeastFrc *Segment_RoadClass `protobuf:"varint,5,opt,name=least_frc,json=leastFrc,enum=opentraffic.osmlr.Segment_RoadClass" json:"least_frc,omitempty"`
// length in meters, rounded to the nearest meter. the maximum allowed
// length is 15km, but most segments will be much shorter, so a varint
// representation makes sense.
//
// if the length between successive LocationReferences is more than 15km
// then you MUST insert an intermediate LocationReference.
Length *uint32 `protobuf:"varint,6,opt,name=length" json:"length,omitempty"`
// Is this LRP at a node/intersection (true) or along a road (false)?
// This hint can be useful wen associating OSMLR to routing graphs
AtNode *bool `protobuf:"varint,7,opt,name=at_node,json=atNode" json:"at_node,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Segment_LocationReference) Reset() { *m = Segment_LocationReference{} }
func (m *Segment_LocationReference) String() string { return proto.CompactTextString(m) }
func (*Segment_LocationReference) ProtoMessage() {}
func (*Segment_LocationReference) Descriptor() ([]byte, []int) {
return fileDescriptorSegment, []int{0, 1}
}
func (m *Segment_LocationReference) GetCoord() *Segment_LatLng {
if m != nil {
return m.Coord
}
return nil
}
func (m *Segment_LocationReference) GetBear() uint32 {
if m != nil && m.Bear != nil {
return *m.Bear
}
return 0
}
func (m *Segment_LocationReference) GetStartFrc() Segment_RoadClass {
if m != nil && m.StartFrc != nil {
return *m.StartFrc
}
return Segment_ClassMotorway
}
func (m *Segment_LocationReference) GetStartFow() Segment_FormOfWay {
if m != nil && m.StartFow != nil {
return *m.StartFow
}
return Segment_FowUndefined
}
func (m *Segment_LocationReference) GetLeastFrc() Segment_RoadClass {
if m != nil && m.LeastFrc != nil {
return *m.LeastFrc
}
return Segment_ClassMotorway
}
func (m *Segment_LocationReference) GetLength() uint32 {
if m != nil && m.Length != nil {
return *m.Length
}
return 0
}
func (m *Segment_LocationReference) GetAtNode() bool {
if m != nil && m.AtNode != nil {
return *m.AtNode
}
return false
}
func init() {
proto.RegisterType((*Segment)(nil), "opentraffic.osmlr.Segment")
proto.RegisterType((*Segment_LatLng)(nil), "opentraffic.osmlr.Segment.LatLng")
proto.RegisterType((*Segment_LocationReference)(nil), "opentraffic.osmlr.Segment.LocationReference")
proto.RegisterEnum("opentraffic.osmlr.Segment_RoadClass", Segment_RoadClass_name, Segment_RoadClass_value)
proto.RegisterEnum("opentraffic.osmlr.Segment_FormOfWay", Segment_FormOfWay_name, Segment_FormOfWay_value)
}
func init() { proto.RegisterFile("segment.proto", fileDescriptorSegment) }
var fileDescriptorSegment = []byte{
// 529 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0xcb, 0x
|
Descriptor
|
identifier_name
|
segment.pb.go
|
) String() string {
return proto.EnumName(Segment_FormOfWay_name, int32(x))
}
func (x *Segment_FormOfWay) UnmarshalJSON(data []byte) error {
value, err := proto.UnmarshalJSONEnum(Segment_FormOfWay_value, data, "Segment_FormOfWay")
if err != nil {
return err
}
*x = Segment_FormOfWay(value)
return nil
}
func (Segment_FormOfWay) EnumDescriptor() ([]byte, []int) { return fileDescriptorSegment, []int{0, 1} }
type Segment struct {
// a segment is a list of at least two LocationReferences.
//
// all but the last LocationReference must contain a full set of data for
// each field, but the final one should consist of only a reference
// coordinate. any other information on the final LocationReference may be
// ignored.
//
// a segment with only a single LocationReference is invalid and may be
// ignored.
Lrps []*Segment_LocationReference `protobuf:"bytes,1,rep,name=lrps" json:"lrps,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Segment) Reset() { *m = Segment{} }
func (m *Segment) String() string { return proto.CompactTextString(m) }
func (*Segment) ProtoMessage() {}
func (*Segment) Descriptor() ([]byte, []int) { return fileDescriptorSegment, []int{0} }
func (m *Segment) GetLrps() []*Segment_LocationReference {
if m != nil
|
return nil
}
type Segment_LatLng struct {
// lat & lng in EPSG:4326 multiplied by 10^7 and rounded to the nearest
// integer. this gives a precision of about 1.1cm (7/16ths of an inch)
// worst case at the equator.
Lat *int32 `protobuf:"fixed32,1,opt,name=lat" json:"lat,omitempty"`
Lng *int32 `protobuf:"fixed32,2,opt,name=lng" json:"lng,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Segment_LatLng) Reset() { *m = Segment_LatLng{} }
func (m *Segment_LatLng) String() string { return proto.CompactTextString(m) }
func (*Segment_LatLng) ProtoMessage() {}
func (*Segment_LatLng) Descriptor() ([]byte, []int) { return fileDescriptorSegment, []int{0, 0} }
func (m *Segment_LatLng) GetLat() int32 {
if m != nil && m.Lat != nil {
return *m.Lat
}
return 0
}
func (m *Segment_LatLng) GetLng() int32 {
if m != nil && m.Lng != nil {
return *m.Lng
}
return 0
}
// a segment consists of multiple LocationReferences, each of which describes
// the road at a particular reference coordinate, or properties of the road
// between the current LocationReference and the next.
//
// the first and last LocationReference reference coordinates will usually be at
// "true" intersections, which are intersections where there are multiple
// paths through the intersection. This excludes "false" intersections where
// two roads cross, but there are not multiple paths, such as overpasses,
// bridges, changes of road name or properties.
//
// ___
// | / \ _4---5--
// -1-----2 \ __/ |
// | 3__/ |
//
// in the example above, the Segment consists of 5 LocationReferences,
// numbered 1-5. locations 1 & 5 are at true intersections with other roads
// and 2, 3 & 4 are intermediate LocationReferences inserted due to the length
// of the road.
//
// Occasionally, a LocationReference can be inserted along a road (i.e., not at
// a true intersection) to break long road segments into multiple OSMLR
// segments.
//
type Segment_LocationReference struct {
// the reference coordinate.
Coord *Segment_LatLng `protobuf:"bytes,1,opt,name=coord" json:"coord,omitempty"`
// bearing in degrees clockwise from true north between 0 and 359 - will
// generally fit in a couple of bytes varint.
//
// the bearing should be calculated toward a point 20m along the road from
// the reference coordinate towards the next LocationReference. if this is
// the final LocationReference, then omit the bearing.
//
// each LocationReference, of which there may be several in this Segment,
// except for the last must have a bearing calculated from the reference
// coordinate of this LocationReference.
Bear *uint32 `protobuf:"varint,2,opt,name=bear" json:"bear,omitempty"`
// road class at the reference coordinate.
StartFrc *Segment_RoadClass `protobuf:"varint,3,opt,name=start_frc,json=startFrc,enum=opentraffic.osmlr.Segment_RoadClass" json:"start_frc,omitempty"`
// form of way at the reference coordinate.
StartFow *Segment_FormOfWay `protobuf:"varint,4,opt,name=start_fow,json=startFow,enum=opentraffic.osmlr.Segment_FormOfWay" json:"start_fow,omitempty"`
// lowest road class (most important road) between the start coordinate
// and the next LocationReference.
LeastFrc *Segment_RoadClass `protobuf:"varint,5,opt,name=least_frc,json=leastFrc,enum=opentraffic.osmlr.Segment_RoadClass" json:"least_frc,omitempty"`
// length in meters, rounded to the nearest meter. the maximum allowed
// length is 15km, but most segments will be much shorter, so a varint
// representation makes sense.
//
// if the length between successive LocationReferences is more than 15km
// then you MUST insert an intermediate LocationReference.
Length *uint32 `protobuf:"varint,6,opt,name=length" json:"length,omitempty"`
// Is this LRP at a node/intersection (true) or along a road (false)?
// This hint can be useful wen associating OSMLR to routing graphs
AtNode *bool `protobuf:"varint,7,opt,name=at_node,json=atNode" json:"at_node,omitempty"`
XXX_unrecognized []byte `json:"-"`
}
func (m *Segment_LocationReference) Reset() { *m = Segment_LocationReference{} }
func (m *Segment_LocationReference) String() string { return proto.CompactTextString(m) }
func (*Segment_LocationReference) ProtoMessage() {}
func (*Segment_LocationReference) Descriptor() ([]byte, []int) {
return fileDescriptorSegment, []int{0, 1}
}
func (m *Segment_LocationReference) GetCoord() *Segment_LatLng {
if m != nil {
return m.Coord
}
return nil
}
func (m *Segment_LocationReference) GetBear() uint32 {
if m != nil && m.Bear != nil {
return *m.Bear
}
return 0
}
func (m *Segment_LocationReference) GetStartFrc() Segment_RoadClass {
if m != nil && m.StartFrc != nil {
return *m.StartFrc
}
return Segment_ClassMotorway
}
func (m *Segment_LocationReference) GetStartFow() Segment_FormOfWay {
if m != nil && m.StartFow != nil {
return *m.StartFow
}
return Segment_FowUndefined
}
func (m *Segment_LocationReference) GetLeastFrc() Segment_RoadClass {
if m != nil && m.LeastFrc != nil {
return *m.LeastFrc
}
return Segment_ClassMotorway
}
func (m *Segment_LocationReference) GetLength() uint32 {
if m != nil && m.Length != nil {
return *m.Length
}
return 0
}
func (m *Segment_LocationReference) GetAtNode() bool {
if m != nil && m.AtNode != nil {
return *m.AtNode
}
return false
}
func init() {
proto.RegisterType((*Segment)(nil), "opentraffic.osmlr.Segment")
proto.RegisterType((*Segment_LatLng)(nil), "opentraffic.osmlr.Segment.LatLng")
proto.RegisterType((*Segment_LocationReference)(nil), "opentraffic.osmlr.Segment.LocationReference")
proto.RegisterEnum("opentraffic.osmlr.Segment_RoadClass", Segment_RoadClass_name, Segment_RoadClass_value)
proto.RegisterEnum("opentraffic.osmlr.Segment_FormOfWay", Segment_FormOfWay_name, Segment_FormOfWay_value)
}
func init() { proto.RegisterFile("segment.proto", fileDescriptorSegment) }
var fileDescriptorSegment = []byte{
// 529 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0xcb, 0
|
{
return m.Lrps
}
|
conditional_block
|
table-form.component.ts
|
Length);
c.revStr = `${c.tipInfo} ${this._calTotalFiled[c.field]} ${c.tipInfoType}`;
}
}
// tab头点击
tabClick(data: any): void { // 不要用
this.tabResultFun.emit(data);
setTimeout( () => {
if (this.updateHeader) {
if (!this._columns) {
this.getUserColumns({formId: this.curFormId, userId: this.info.APPINFO.USER.userId, gridId: this.gridId}); // 参数待定
}
}
}, 100);
}
// input失焦事件
inputBlur(data: any, dex: number){
this.inputBlurFun.emit({data: data, index: dex});
}
// input实时改变事件
onChange(val: string, data: any, n: any, h: any, ind: any){
this.modelChange.emit({val: val, data: data, input: n, header: h, index: ind});
}
// list鼠标移入事件
listOverFun(data: any, index: number){
this.listOver.emit({'data': data, 'index': index});
}
// list鼠标移出事件
listLeaveFun(data: any, index: number){
this.listLeave.emit({'data': data, 'index': index});
}
// 表格列表点击事件
tdClick(data: any, index: number, bol: boolean){
if (!bol) {
return;
}
this._trSelected = [];
this._trSelected[index] = true;
this.listClick.emit(data);
this.allCheckBoxStyle();
this.updateData = [];
if (this.constData && this.constData[0]) {
this.updateData = this.constData.filter((x: any) => x.checked);
}
}
// 操作
inpEmitFun(data: any, dex: any, rowid: any, n: any) {
this.inpEmit.emit(
{ inpName: data.inpName || '', inpValue: data.inpValue || '',
selData: data.selData || {}, index: dex, rowid: rowid, eName: n.colEname}
);
}
//
filterChange(bool: Boolean, viewer: any, header: any){
if (!bool) {
viewer.scrollToIndex(0);
const name = header.colEname;
header._checked = [];
this.headerFilter[name].forEach(x => x.checked && header._checked.push(x.text));
this.filterData(header._checked, name);
}
}
// sort 排序
sort(flag: any, index: any) {
// flag 排序类型 null descend ascend
const type = !flag ? 'descend' : flag === 'descend' ? 'ascend' : null;
index._sortType = type;
// if (!type){
// this.allData = [...this.constData];
// return;
// }
this.allData = this.allData.sort((x: any, y: any) => {
let a = x[index.colEname], b = y[index.colEname];
if (index.type === 'number') {
a = Number(a || 0);
b = Number(b || 0);
} else {
a = JSON.stringify(a || null);
b = JSON.stringify(b || null);
if (index.type === 'string') {
return (type === 'ascend') ? a.localeCompare(b, 'zh') : b.localeCompare(a, 'zh');
}
}
return (type === 'ascend') ? ( a > b ? 1 : -1) : (b > a ? 1 : -1);
} );
this.currentChange = true;
this.allData = [...this.allData];
}
// filter 关闭
filterClose(flag: string, header: any){
const name = header.colEname;
header._checked = [];
if (flag === 'confirm') {
this.headerFilter[name].forEach(x => x.checked && header._checked.push(x.text));
} else if (flag === 'reset') {
this.headerFilter[name].forEach(x => x.checked = false);
header._filterVal = null;
}
header._filterVisible = false;
this.filterData( header._checked, name);
}
// filter 刷选
filterData(data: any[]|any, name: string){
this.filterSearch[name] = [...( Array.isArray(data) && data || [])];
if (!Array.isArray(data) || (data.length < 1) ) { // 每列重置
this.updateData.forEach((item: any) => item.checked = false); // 选中重置
this.updateData = [];
this.allChecked = false;
this.indeterminate = false;
this.filterSearch[name] = [];
this.allData = [...this.constData];
} else { // 确定
this.allData = this.constData.filter((x: any) => {
const xName = x[name] !== null && x[name] !== undefined && x[name].toString().trim() || '';
return this.filterSearch[name].indexOf(xName) !== -1;
})
}
// 对别的已选做刷选
const arr = [...this.allData];
this.allData.map((x: any, index: number) => {
Object.keys(this.filterSearch).map((y: string) => {
if (y !== name.trim()) {
const xName = x[y] !== null && x[y] !== undefined && x[y].toString().trim() || '';
if ( (this.filterSearch[y].length > 0) && (this.filterSearch[y].indexOf(xName) === -1) ) {
arr[index] = undefined;
}
}
});
});
this.allData = arr.filter((x: any) => x !== undefined);
this.currentChange = true;
this.headerFilterData(this.allData);
}
// colDrag 列拖拽
coldrag(data: any) {
let tableWidth = parseFloat(this.tableWidth);
const currentWidth = parseFloat(this.dataHeader[data.index].width);
tableWidth = tableWidth - currentWidth;
this.tableWidth = `${tableWidth + data.nex}px`;
this.dataHeader[data.index].width = `${data.nex}px`;
}
ngOnDestroy() {
if (this.gloColSub) { this.gloColSub.unsubscribe(); }
if (this.gloPageSub) { this.gloPageSub.unsubscribe(); }
if (this.gloSelectedSub) { this.gloSelectedSub.unsubscribe(); }
}
// static 静态数据获取
getSelectData(data: Array<any>, valueSetCode: string, item: any) {
if (this.staticCode) {
this.staticCode(valueSetCode, item).subscribe( x => {
Array.prototype.push.apply(data, x);
})
return;
}
this.http.post(urls.static, {valueSetCode: valueSetCode}).then(
(res: any) => {
if (res.success) {
Array.prototype.push.apply(data, res.data.data.data);
}
}
);
}
private headerFilterData(data: any) {
// filter数据,$~$
const hadFilter = Object.keys(this.filterSearch).map((x: string) => (this.filterSearch[x].length > 0) && x);
window.setTimeout(() => {
let colHad = {}, tempStr: string;
this.dataHeader.map((y: any) => {
if (!hadFilter.includes(y.colEname)) { this.headerFilter[y.colEname] = []; }
});
data.map((x: any) => {
this.dataHeader.map((y: any) => {
const yname = (x[y.colEname] === '' || x[y.colEname] == null || x[y.colEname] === undefined) ? '' : x[y.colEname];
tempStr = y.colEname + yname;
if (!colHad[tempStr] && !hadFilter.includes(y.colEname)) {
const xName = yname.toString().trim() || '';
this.headerFilter[y.colEname].push({'text': xName, 'value': xName});
colHad[tempStr] = true;
}
});
});
colHad = undefined; // 清空
});
}
private dataFilterResult() {
let u: any[] = [...this.allData];
let enterBool = false;
for (const i in this.filterSearch) {
if(this.filterSearch[i].length > 0) {
enterBool = true;
u = u.filter((m: any) => this.filterSearch[i].includes(m[i] || ''));
}
}
if (enterBool) { this.allData = [...u]; }
}
/**
* tab change事件
* @param param
*/
tabIndexChangeEvent(param: number) {
this.tabIndexChange.emit(param);
}
keyUp(data:any,tableData:any){
this.keyboardEmit.emit({keyboardData:data,tableData:tableData});
}
isNotSelected(data: any,colEname:string): boolean {
return !this.allData.some(value1 => value1[colEname] === data.value);
}
}
|
conditional_block
|
||
table-form.component.ts
|
Input() paginationRef: TemplateRef<any>;
@Input() tdTemplate:TemplateRef<any>;
@Input() set pageSizeOptions(val: number[]) { // 页码自定义
this._pageSizeOptions = val;
}
get pageSizeOptions() {
return this._pageSizeOptions;
}
@Input() colSet = true; // 列表设置是否显示
@Input() isResetFilter: boolean; // 是否对filte进行重置
// 列表数据
@Input() set dataSet(val: any[]) { // 只接收外部传进的值
if (this.isResetFilter) {
this.filterSearch = {}; // filter重置 已筛选的存储
}
this.currentChange = false;
this.allData = [];
this.constData = Array.isArray(val) ? val : []; // 数量不可变
window.setTimeout(() => {
this.allData = [...this.constData];
this.dataFilterResult();
this.headerFilterData(this.allData);
},300);
}
// 数据总数
@Input() set totalPage(val: number) {
this._totalPage = val;
} // 数据总数
get totalPage() {
return this._totalPage;
}
@Input() listLoading: boolean; // list加载
@Input() pageFun: boolean; // true页码条数自写
@Input() set gridOperate(val: any) { // 表格操作自定义
if (Utils.isObject(val)) {
this._gridOperate.show = val.show;
this._gridOperate.title = val.title || '操作';
this._gridOperate.width = val.width || '120px';
this._gridOperate.template = val.template;
}
} // 操作{show:false,title:'操作',width:'120px',template:''};
@Input() caculateEnameArr: any = [
];
@Input() set refresh(val:any)
|
refreshStatus();
}
@Input() popData: any;
@Input() popTableData: any = [];
@Input() searchParamFiled: any; // pop弹框调接口要传的参数名
@Input() searchParamFiledNot: any; // pop弹框调接口要传的参数名不必传 {eName:ttrue},格式
@Input() tableTitle: string|TemplateRef<void>; // 表格标题
@Input() tableFooter: string|TemplateRef<void>; // 表格尾部
@Input() selectedChange = false; // 全选事件订阅
@Input() searchListFiled: any; // pop弹窗取当前数据哪个字段的值
@Input() staticCode: (data: string, item?: any) => Observable<any[]>; // 组件内静态数据自定义 [{name: null, value: null}]
@Input() needStaticCode: boolean; // 当没有apiParam时需要获取静态数据时设为true
@Output() updateDataResult = new EventEmitter<any>(); // 选中数据的结果以及点击列表选择框事件
@Output() inputBlurFun = new EventEmitter<any>(); // input Blur失焦
@Output() modelChange = new EventEmitter<any>(); // input值改变事件
@Output() listClick = new EventEmitter<any>(); // list点击事件
@Output() listOver = new EventEmitter<any>(); // list鼠标移入事件
@Output() listLeave = new EventEmitter<any>(); // list鼠标移出事件
@Output() pageIndexEmit = new EventEmitter<any>(); // 页码点击事件
@Output() pageSizeEmit = new EventEmitter<any>(); // 条数点击事件
@Output() userColumnsEmit = new EventEmitter<any>(); // 表头数据返回
@Output() currentPageDataChangeEmit = new EventEmitter<any>(); // 当前页面数据更新
@Output() selectedChangeEmit = new EventEmitter<any>(); // 勾选事件,抛出数据
@Output() keyboardEmit = new EventEmitter<any>()
constructor(private http: HttpUtilService, private info: UserinfoService, private globalSer: GlobalService,
@Host() private rowSource: GridRowSource) {
}
private restoreRender(item: any) {
if (item.type === 'template') {
const tplName = `tpl-${item.gridId}-${item.colEname}`;
item.template = this.rowSource.getRow(tplName);
if (!item.template) {
console.error(`template类型列配置错误!templateId:${tplName}`);
}
}
}
ngOnInit() {
this.virtualMinBuffer = parseInt(this.selfTableHeight || this.tableHeight, 0);
this.virtualMaxBuffer = this.virtualMinBuffer + 100;
this.curFormId = this.formId || this.info.APPINFO.formId;
if (!this._columns) {
this.getUserColumns({formId: this.curFormId, userId: this.info.APPINFO.USER.userId,gridId: this.gridId});
}
this.gloPageSub = this.globalSer.pageNumEmitter.subscribe( (x: any) => {
if (this.curFormId === x.formId) {
this.pageIndex = x.page;
}
});
this.gloColSub = this.globalSer.colChangeEmitter.subscribe(
(res: any) => this.getUserColumns({formId: this.curFormId, userId: this.info.APPINFO.USER.userId, gridId: this.gridId})
);
for (const c of this.caculateEnameArr) {
this._calTotalFiled[c.field] = toDemical(0, c.demLength);
c.revStr = `${c.tipInfo} ${this._calTotalFiled[c.field]} ${c.tipInfoType}`;
}
if (this.selectedChange) {
this.gloSelectedSub = this.globalSer.tableSelectedChangeEmitter.subscribe(
res => {
if (res.gridId === this.gridId) {
this.checkAll(res.checked)
}
}
);
}
this.globalSer.tableGridIdToSearchForm.emit({'gridId': this.gridId, 'formId': this.curFormId}); // 向查询区域传递gridId;
this.globalSer.routerEvent.subscribe((x: any) => {
if (x.isSys) {
this.nzTableComponent.cdkVirtualScrollViewport.scrollToIndex(0);
}
});
this.globalSer.pageNumEmitter.emit(
{formId: this.curFormId, gridId: this.gridId, page: 1, length: this.pageSize, search: true}
); // 初始化的条数
}
tableHeightFun(data: number) { // 表格自适应抛出数据
this.tableHeight = `${data}px`;
this.currentTableHeightFun.emit(data);
this.virtualMinBuffer = data;
this.virtualMaxBuffer = this.virtualMinBuffer + 100;
}
// 选择逻辑
refreshStatus(data?: any): void {
let currentChecked: boolean;
if (data && !this.isMutli) {
currentChecked = data.checked;
}
this.allData.map((y: any) => !this.isMutli && (y.checked = false));
if (data && !this.isMutli) {
data.checked = currentChecked; // 单选情况下设置checked;
}
this.allCheckBoxStyle();
this.updateData = this.constData.filter((x: any) => x.checked);
this.updateDataResult.emit(this.updateData); // 选中的结果数据
// 统计需要统计的字段
for (const c of this.caculateEnameArr){
this._calTotalFiled[c.field] = toDemical(0, c.demLength);
// for (let elem of this.updateData) {
// // this._calTotalFiled[c.field] += Number(elem[c.field]);
// this._calTotalFiled[c.field] = toDemical(Utils.add(this._calTotalFiled[c.field], elem[c.field]),c.demLength);
// }
// 2019-1-14郑鑫修改, 将每次求和都四舍五入改为将最后结果四舍五入
this._calTotalFiled[c.field] = toDemical(
this.updateData.map(item => Number(item[c.field]) || 0).reduce((acc, cur) => acc + cur, 0), c.demLength
);
c.revStr = `${c.tipInfo} ${this._calTotalFiled[c.field]} ${c.tipInfoType}`;
}
if (data) {
this.selectedChangeEmit.emit([data]);
} else {
this.selectedChangeEmit.emit(this.allData);
}
}
private allCheckBoxStyle() {
const dataArr = this.allData.filter(value => !value.disabled);
const allChecked = dataArr[0] ? dataArr.every(value => value.checked === true) : false;
const allUnChecked = dataArr.every(value => !value.checked);
this.allChecked = allChecked;
this.indeterminate = (!allChecked) && (!allUnChecked);
}
checkAll(value: boolean): void {
this.allData = this.allData ? this.allData : [];
this.allData.forEach(data => {
|
{ //用于初始化表格中存在已选数据,选中条数的变化,想触发必须更改成不同值
this.
|
identifier_body
|
table-form.component.ts
|
])
])
]*/
})
export class TableFormComponent implements OnInit,OnDestroy {
// tempfindSet: any = { "parameter": "companyName", "parameterSend": "companyId", "name": "发票抬头", "formId": "company_pop" };
// 数据弹框传入参数配置格式
@ViewChild('nzTable') nzTableComponent: NzTableComponent;
private gloPageSub: Subscription;
private gloColSub: Subscription;
private gloSelectedSub: Subscription;
private _columns: any[];
tableHeight = '500px';
tableWidth = '100%';
allChecked = false;
indeterminate = false;
updateData: Array<any> = [];
dataHeader: Array<any> = []; // 表头
dataHeaderRefresh = false; // 判断表头是否刷新
pageIndex = 1; // 当前页码
_trSelected: boolean[] = [];
_gridOperate: any = {}; // 操作{show:false,title:'操作',width:'120px',template:''};
_calTotalFiled: any = {};
_totalPage = 0;
_pageSizeOptions: number[] = [30, 100, 500, 1000, 5000, 1000000]; // 1000000 显示全部
headerFilter: any = {}; // filter数据
filterSearch: any = {}; // filter条件
allData: any[] = [];
curFormId: string;
virtualMinBuffer: number;
virtualMaxBuffer: number;
private constData: any[] = [];
private currentChange = false;
private trSelectHand = false; // 判断是否
@Input() noGetStaticData: boolean; // 不用获取静态数据 设置True
@Input() closeBgTr = true; // tr换行背景色是否显示
@Input() selfTableHeight: string | null | undefined; // 表格高度自适应,如果不需要自适应可设置该参数 自定义table高度
@Input() extraTableHeight: number; // 正常模式页面额外的高度
@Output() currentTableHeightFun = new EventEmitter<number>(); // 如果表格高度是自适应的,抛出表格高度
@Output() inpEmit = new EventEmitter<any>();
@Input() tabArr: any[] = []; // tab头数据 不要用
@Input() tabIndex = 0; // 激活的面板,默认第一个
@Output() tabIndexChange: EventEmitter<number> = new EventEmitter<number>();
@Input() updateHeader = false; // 点击tab是否更新表头
@Output() tabResultFun = new EventEmitter<any>(); // tab头点击事件
@Input() set columns(val: any[]) { // 自定义表头 [{colCname:'中文名',colEname:'英文名',visible:'XSBJ10'|'XSBJ20'}]
this._columns = val;
if (Array.isArray(this._columns)) { this.columnsFilter(this._columns); }
}
get columns() {
return this._columns;
}
@Input() checkBoxWidth: string;
@Input() listWidth: string; // list宽度
@Input() trSelectedShow = false; // 选中是否显示样式
@Input() set trSelected(val: boolean[]) { // index以作区分
if (Array.isArray(val)) {
this._trSelected = val;
this.trSelectHand = true;
}
}
get trSelected() {
return this._trSelected;
}
@Input() formId: string; // formId 优先级高
@Input() gridId: string; // 一个页面多个列表必填,传gridId以进行区分,
@Input() isCheckBox = true; // 是否显示选择框,默认显示
@Input() disabledAllCheckBox:boolean = false;//是否禁止全选,默认允许
@Input() isMutli: boolean; // 选择框是否多选
@Input() nzShowPagination = true; // 列表是否显示分页器 ,默认显示
@Input() showCount = true; // 是否显示选择条数
@Input() pageSize = 100; // 条数 默认100
@Input() paginationRef: TemplateRef<any>;
@Input() tdTemplate:TemplateRef<any>;
@Input() set pageSizeOptions(val: number[]) { // 页码自定义
this._pageSizeOptions = val;
}
get pageSizeOptions() {
return this._pageSizeOptions;
}
@Input() colSet = true; // 列表设置是否显示
@Input() isResetFilter: boolean; // 是否对filte进行重置
// 列表数据
@Input() set dataSet(val: any[]) { // 只接收外部传进的值
if (this.isResetFilter) {
this.filterSearch = {}; // filter重置 已筛选的存储
}
this.currentChange = false;
this.allData = [];
this.constData = Array.isArray(val) ? val : []; // 数量不可变
window.setTimeout(() => {
this.allData = [...this.constData];
this.dataFilterResult();
this.headerFilterData(this.allData);
},300);
}
// 数据总数
@Input() set totalPage(val: number) {
this._totalPage = val;
} // 数据总数
get totalPage() {
return this._totalPage;
}
@Input() listLoading: boolean; // list加载
@Input() pageFun: boolean; // true页码条数自写
@Input() set gridOperate(val: any) { // 表格操作自定义
if (Utils.isObject(val)) {
this._gridOperate.show = val.show;
this._gridOperate.title = val.title || '操作';
this._gridOperate.width = val.width || '120px';
this._gridOperate.template = val.template;
}
} // 操作{show:false,title:'操作',width:'120px',template:''};
@Input() caculateEnameArr: any = [
];
@Input() set refresh(val:any){ //用于初始化表格中存在已选数据,选中条数的变化,想触发必须更改成不同值
this.refreshStatus();
}
@Input() popData: any;
@Input() popTableData: any = [];
@Input() searchParamFiled: any; // pop弹框调接口要传的参数名
@Input() searchParamFiledNot: any; // pop弹框调接口要传的参数名不必传 {eName:ttrue},格式
@Input() tableTitle: string|TemplateRef<void>; // 表格标题
@Input() tableFooter: string|TemplateRef<void>; // 表格尾部
@Input() selectedChange = false; // 全选事件订阅
@Input() searchListFiled: any; // pop弹窗取当前数据哪个字段的值
@Input() staticCode: (data: string, item?: any) => Observable<any[]>; // 组件内静态数据自定义 [{name: null, value: null}]
@Input() needStaticCode: boolean; // 当没有apiParam时需要获取静态数据时设为true
@Output() updateDataResult = new EventEmitter<any>(); // 选中数据的结果以及点击列表选择框事件
@Output() inputBlurFun = new EventEmitter<any>(); // input Blur失焦
@Output() modelChange = new EventEmitter<any>(); // input值改变事件
@Output() listClick = new EventEmitter<any>(); // list点击事件
@Output() listOver = new EventEmitter<any>(); // list鼠标移入事件
@Output() listLeave = new EventEmitter<any>(); // list鼠标移出事件
@Output() pageIndexEmit = new EventEmitter<any>(); // 页码点击事件
@Output() pageSizeEmit = new EventEmitter<any>(); // 条数点击事件
@Output() userColumnsEmit = new EventEmitter<any>(); // 表头数据返回
@Output() currentPageDataChangeEmit = new EventEmitter<any>(); // 当前页面数据更新
@Output() selectedChangeEmit = new EventEmitter<any>(); // 勾选事件,抛出数据
@Output() keyboardEmit = new EventEmitter<any>()
constructor(private http: HttpUtilService, private info: UserinfoService, private globalSer: GlobalService,
@Host() private rowSource: GridRowSource) {
}
private restoreRender(item: any) {
if (item.type === 'template') {
const tplName = `tpl-${item.gridId}-${item.colEname}`;
item.template = this.rowSource.getRow(tplName);
if (!
|
style({opacity:0,height:0,transform:'translate(30px,0)'}),
animate('0.3s ease-in',style({opacity:1,height:'auto',transform:'translate(0,0)',background:'#fffeee'}))
]),
transition(':leave',[
animate('0.3s ease-out',style({opacity:0,height:0,transform:'translate(30px,0)'}))
|
random_line_split
|
|
table-form.component.ts
|
Input() paginationRef: TemplateRef<any>;
@Input() tdTemplate:TemplateRef<any>;
@Input() set pageSizeOptions(val: number[]) { // 页码自定义
this._pageSizeOptions = val;
}
get pageSizeOptions() {
return this._pageSizeOptions;
}
@Input() colSet = true; // 列表设置是否显示
@Input() isResetFilter: boolean; // 是否对filte进行重置
// 列表数据
@Input() set dataSet(val: any[]) { // 只接收外部传进的值
if (this.isResetFilter) {
this.filterSearch = {}; // filter重置 已筛选的存储
}
this.currentChange = false;
this.allData = [];
this.constData = Array.isArray(val) ? val : []; // 数量不可变
window.setTimeout(() => {
this.allData = [...this.constData];
this.dataFilterResult();
this.headerFilterData(this.allData);
},300);
}
// 数据总数
@Input() set totalPage(val: number) {
this._totalPage = val;
} // 数据总数
get totalPage() {
return this._totalPage;
}
@Input() listLoading: boolean; // list加载
@Input() pageFun: boolean; // true页码条数自写
@Input() set gridOperate(val: any) { // 表格操作自定义
if (Utils.isObject(val)) {
this._gridOperate.show = val.show;
this._gridOperate.title = val.title || '操作';
this._gridOperate.width = val.width || '120px';
this._gridOperate.template = val.template;
}
} // 操作{show:false,title:'操作',width:'120px',template:''};
@Input() caculateEnameArr: any = [
];
@Input() set refresh(val:any){ //用于初始化表格中存在已选数据,选中条数的变化,想触发必须更改成不同值
this.refresh
|
}
@Input() popData: any;
@Input() popTableData: any = [];
@Input() searchParamFiled: any; // pop弹框调接口要传的参数名
@Input() searchParamFiledNot: any; // pop弹框调接口要传的参数名不必传 {eName:ttrue},格式
@Input() tableTitle: string|TemplateRef<void>; // 表格标题
@Input() tableFooter: string|TemplateRef<void>; // 表格尾部
@Input() selectedChange = false; // 全选事件订阅
@Input() searchListFiled: any; // pop弹窗取当前数据哪个字段的值
@Input() staticCode: (data: string, item?: any) => Observable<any[]>; // 组件内静态数据自定义 [{name: null, value: null}]
@Input() needStaticCode: boolean; // 当没有apiParam时需要获取静态数据时设为true
@Output() updateDataResult = new EventEmitter<any>(); // 选中数据的结果以及点击列表选择框事件
@Output() inputBlurFun = new EventEmitter<any>(); // input Blur失焦
@Output() modelChange = new EventEmitter<any>(); // input值改变事件
@Output() listClick = new EventEmitter<any>(); // list点击事件
@Output() listOver = new EventEmitter<any>(); // list鼠标移入事件
@Output() listLeave = new EventEmitter<any>(); // list鼠标移出事件
@Output() pageIndexEmit = new EventEmitter<any>(); // 页码点击事件
@Output() pageSizeEmit = new EventEmitter<any>(); // 条数点击事件
@Output() userColumnsEmit = new EventEmitter<any>(); // 表头数据返回
@Output() currentPageDataChangeEmit = new EventEmitter<any>(); // 当前页面数据更新
@Output() selectedChangeEmit = new EventEmitter<any>(); // 勾选事件,抛出数据
@Output() keyboardEmit = new EventEmitter<any>()
constructor(private http: HttpUtilService, private info: UserinfoService, private globalSer: GlobalService,
@Host() private rowSource: GridRowSource) {
}
private restoreRender(item: any) {
if (item.type === 'template') {
const tplName = `tpl-${item.gridId}-${item.colEname}`;
item.template = this.rowSource.getRow(tplName);
if (!item.template) {
console.error(`template类型列配置错误!templateId:${tplName}`);
}
}
}
ngOnInit() {
this.virtualMinBuffer = parseInt(this.selfTableHeight || this.tableHeight, 0);
this.virtualMaxBuffer = this.virtualMinBuffer + 100;
this.curFormId = this.formId || this.info.APPINFO.formId;
if (!this._columns) {
this.getUserColumns({formId: this.curFormId, userId: this.info.APPINFO.USER.userId,gridId: this.gridId});
}
this.gloPageSub = this.globalSer.pageNumEmitter.subscribe( (x: any) => {
if (this.curFormId === x.formId) {
this.pageIndex = x.page;
}
});
this.gloColSub = this.globalSer.colChangeEmitter.subscribe(
(res: any) => this.getUserColumns({formId: this.curFormId, userId: this.info.APPINFO.USER.userId, gridId: this.gridId})
);
for (const c of this.caculateEnameArr) {
this._calTotalFiled[c.field] = toDemical(0, c.demLength);
c.revStr = `${c.tipInfo} ${this._calTotalFiled[c.field]} ${c.tipInfoType}`;
}
if (this.selectedChange) {
this.gloSelectedSub = this.globalSer.tableSelectedChangeEmitter.subscribe(
res => {
if (res.gridId === this.gridId) {
this.checkAll(res.checked)
}
}
);
}
this.globalSer.tableGridIdToSearchForm.emit({'gridId': this.gridId, 'formId': this.curFormId}); // 向查询区域传递gridId;
this.globalSer.routerEvent.subscribe((x: any) => {
if (x.isSys) {
this.nzTableComponent.cdkVirtualScrollViewport.scrollToIndex(0);
}
});
this.globalSer.pageNumEmitter.emit(
{formId: this.curFormId, gridId: this.gridId, page: 1, length: this.pageSize, search: true}
); // 初始化的条数
}
tableHeightFun(data: number) { // 表格自适应抛出数据
this.tableHeight = `${data}px`;
this.currentTableHeightFun.emit(data);
this.virtualMinBuffer = data;
this.virtualMaxBuffer = this.virtualMinBuffer + 100;
}
// 选择逻辑
refreshStatus(data?: any): void {
let currentChecked: boolean;
if (data && !this.isMutli) {
currentChecked = data.checked;
}
this.allData.map((y: any) => !this.isMutli && (y.checked = false));
if (data && !this.isMutli) {
data.checked = currentChecked; // 单选情况下设置checked;
}
this.allCheckBoxStyle();
this.updateData = this.constData.filter((x: any) => x.checked);
this.updateDataResult.emit(this.updateData); // 选中的结果数据
// 统计需要统计的字段
for (const c of this.caculateEnameArr){
this._calTotalFiled[c.field] = toDemical(0, c.demLength);
// for (let elem of this.updateData) {
// // this._calTotalFiled[c.field] += Number(elem[c.field]);
// this._calTotalFiled[c.field] = toDemical(Utils.add(this._calTotalFiled[c.field], elem[c.field]),c.demLength);
// }
// 2019-1-14郑鑫修改, 将每次求和都四舍五入改为将最后结果四舍五入
this._calTotalFiled[c.field] = toDemical(
this.updateData.map(item => Number(item[c.field]) || 0).reduce((acc, cur) => acc + cur, 0), c.demLength
);
c.revStr = `${c.tipInfo} ${this._calTotalFiled[c.field]} ${c.tipInfoType}`;
}
if (data) {
this.selectedChangeEmit.emit([data]);
} else {
this.selectedChangeEmit.emit(this.allData);
}
}
private allCheckBoxStyle() {
const dataArr = this.allData.filter(value => !value.disabled);
const allChecked = dataArr[0] ? dataArr.every(value => value.checked === true) : false;
const allUnChecked = dataArr.every(value => !value.checked);
this.allChecked = allChecked;
this.indeterminate = (!allChecked) && (!allUnChecked);
}
checkAll(value: boolean): void {
this.allData = this.allData ? this.allData : [];
this.allData.forEach(data => {
if
|
Status();
|
identifier_name
|
itemUpdates.ts
|
.ts data, etc.
*
* To limit the hack, this should only be accessed from the proxy back-end, not
* from the Electron front-end.
*/
let localItems = _.clone(items);
export const localItemsById = _.zipObject(
items.map((i) => i.id),
localItems,
);
interface PrizeItem {
type_name: schemas.ItemTypeName;
name: string;
id: number;
image_path: string;
}
function showUnknownItem(item: PrizeItem) {
logger.warn(`Unknown ${item.type_name.toLowerCase()}: ${item.name} (ID ${item.id})`);
}
function addLocalItem({ name, type_name, id }: PrizeItem) {
const newItem = { name, type: type_name.toLowerCase() as ItemType, id };
localItems.push(newItem);
localItems = _.sortBy(localItems, 'id');
localItemsById[id] = newItem;
}
function showLocalItem(item: PrizeItem) {
const type = ItemTypeLookup[item.type_name] || item.type_name;
logger.info(
'New (previously unknown) item:\n' +
`{\n name: "${item.name}",\n type: ItemType.${type},\n id: ${item.id}\n},`,
);
}
function showLocalDressRecord({
dress_record_id,
name,
buddy_id,
}: {
dress_record_id: number;
name: string;
buddy_id: number;
}) {
logger.info(
'New (previously unknown) dress record:\n' +
`{\n name: "${name}",\n id: ${dress_record_id},\n characterId: ${buddy_id},\n},`,
);
}
function checkKnownEnlir(item: PrizeItem, ...enlirData: any) {
if (_.every(enlirData, (i) => i[item.id] == null)) {
showUnknownItem(item);
}
}
function checkKnownDressRecord(item: PrizeItem) {
if (dressRecordsById[item.id] == null) {
const match = item.image_path.match(/(\d+)\/\d+\/\d+\.png/);
const buddyId = match ? +match[1] : 0;
showLocalDressRecord({ dress_record_id: item.id, name: item.name, buddy_id: buddyId });
}
}
function checkKnownItems(item: PrizeItem) {
if (localItemsById[item.id] == null) {
if (item.type_name === 'MUSIC_TICKET') {
// Music tickets are regularly released and are easy to dynamically add,
// so we won't try tracking them ourselves.
return;
}
showUnknownItem(item);
addLocalItem(item);
showLocalItem(item);
}
}
function
|
(item: PrizeItem) {
if (item.type_name === 'BEAST') {
checkKnownEnlir(item, enlir.magicites);
} else if (item.type_name === 'EQUIPMENT') {
checkKnownEnlir(item, enlir.relics, enlir.heroArtifacts);
} else if (item.type_name === 'ABILITY') {
checkKnownEnlir(item, enlir.abilities);
} else if (
item.type_name === 'BUDDY' ||
item.type_name === 'MEMORY_CRYSTAL' ||
item.type_name === 'RECORD_MATERIA'
) {
// FIXME: Need an internal-id-indexed version of characters, memory crystals, record materia
} else if (item.type_name === 'DRESS_RECORD') {
checkKnownDressRecord(item);
} else {
checkKnownItems(item);
}
}
function checkPartyItems(
partyItems: Array<{ name: string; id: number; image_path: string }>,
type: ItemType,
) {
for (const i of _.sortBy(partyItems, 'id')) {
checkKnownItems({
name: i.name,
id: i.id,
type_name: type.toUpperCase() as schemas.ItemTypeName,
image_path: i.image_path,
});
}
}
function checkPartyDressRecords(data: schemas.PartyList | schemas.PartyListOther) {
for (const i of _.sortBy(data.dress_records, 'dress_record_id')) {
if (dressRecordsById[i.dress_record_id] == null) {
showLocalDressRecord(i);
}
}
}
function checkAllPartyItems(data: schemas.PartyList | schemas.PartyListOther) {
// No need to check isRecordDungeonPartyList, as long as we're only
// looking for new / previously unknown items.
checkPartyItems(data.equipment_hyper_evolve_materials, ItemType.DarkMatter);
checkPartyItems(data.equipment_sp_materials, ItemType.UpgradeMaterial);
checkPartyItems(data.materials, ItemType.Orb);
checkPartyItems(data.grow_eggs, ItemType.GrowthEgg);
checkPartyItems(data.sphere_materials, ItemType.Mote);
checkPartyDressRecords(data);
}
function handleWinBattle(data: schemas.WinBattle) {
_.forEach(data.result.prize_master, (item) => {
checkItem({
id: +item.item_id,
type_name: item.type_name,
name: item.name,
image_path: item.image_path,
});
});
}
interface EnlirEntity {
id: number;
name: string;
gl: boolean;
realm: EnlirRealm | null;
}
interface CheckedEntity<T extends EnlirEntity> {
enlirItem: T;
updateRelease: boolean;
updateName: string | undefined;
}
function compareGlEntity<T1 extends { id: number; name: string }, T2 extends EnlirEntity>(
callback: (message: string) => void,
item: T1,
enlirItems: { [id: number]: T2 },
description: string,
source: string,
trim?: (name: string) => string,
): CheckedEntity<T2> | null {
const enlirItem = enlirItems[item.id];
if (!enlirItem) {
callback(`Item update: Unknown ${description} ID ${item.id}, ${item.name}, from ${source}`);
return null;
}
let updateRelease = false;
if (!enlirItem.gl) {
callback(`Item update: ${description} ID ${item.id}, ${item.name}, is now released in global`);
updateRelease = true;
}
const trimmedName = trim ? trim(item.name) : item.name.trimRight();
let updateName: string | undefined;
if (enlirItem.name !== trimmedName) {
callback(
`Item update: ${description} ID ${item.id}, ${item.name}, ` +
`is named ${enlirItem.name} in Enlir`,
);
updateName = item.name;
}
return {
enlirItem,
updateRelease,
updateName,
};
}
function removeRealm<T extends EnlirEntity>(enlirItem: T, name: string) {
if (!enlirItem.realm) {
return name;
}
const re = new RegExp(' \\(' + _.escapeRegExp(enlirItem.realm) + '\\)$');
return name.replace(re, '');
}
function showUpdateCommands<T extends EnlirEntity>(
checked: Array<CheckedEntity<T>>,
tabName: string,
callback: (message: string) => void,
) {
const releaseIds = checked.filter((i) => i.updateRelease).map((i) => i.enlirItem.id);
if (releaseIds.length) {
callback(`update-enlir.ts releaseInGl ${tabName} ${releaseIds.join(' ')}`);
}
const renames = checked
.filter((i) => i.updateName)
.map(
(i) =>
[i.enlirItem.id, '"' + removeRealm(i.enlirItem, i.updateName!) + '"'] as [number, string],
);
if (renames.length) {
callback(`update-enlir.ts rename ${tabName} ${_.flatten(renames).join(' ')}`);
}
}
function checkGlRelicDrawEquipment(
equipmentList: equipmentSchemas.Equipment[],
callback: (message: string) => void,
) {
const checkedRelics: Array<CheckedEntity<EnlirRelic>> = [];
const checkedSoulBreaks: Array<CheckedEntity<EnlirSoulBreak>> = [];
const checkedLegendMateria: Array<CheckedEntity<EnlirLegendMateria>> = [];
const trimRealm = (name: string) => {
return name
.replace(/ \(([IVX]+|Type-0|FFT|Beyond)\) *$/, '')
.replace(/ \([IVX]+-(.*?)\) *$/, ' ($1)');
};
for (const equipment of equipmentList) {
const { id, name, soul_strike, legend_materia } = equipment;
const relicName = `relic ${name} (ID ${id})`;
const compareRelic = compareGlEntity(
callback,
equipment,
enlir.relics,
'relic',
relicName,
trimRealm,
);
if (!compareRelic) {
continue;
}
checkedRelics.push(compareRelic);
if (soul_strike) {
const compareSoulBreak =
|
checkItem
|
identifier_name
|
itemUpdates.ts
|
data, etc.
*
* To limit the hack, this should only be accessed from the proxy back-end, not
* from the Electron front-end.
*/
let localItems = _.clone(items);
export const localItemsById = _.zipObject(
items.map((i) => i.id),
localItems,
);
interface PrizeItem {
type_name: schemas.ItemTypeName;
name: string;
id: number;
image_path: string;
}
function showUnknownItem(item: PrizeItem) {
logger.warn(`Unknown ${item.type_name.toLowerCase()}: ${item.name} (ID ${item.id})`);
}
function addLocalItem({ name, type_name, id }: PrizeItem) {
const newItem = { name, type: type_name.toLowerCase() as ItemType, id };
localItems.push(newItem);
localItems = _.sortBy(localItems, 'id');
localItemsById[id] = newItem;
}
function showLocalItem(item: PrizeItem) {
const type = ItemTypeLookup[item.type_name] || item.type_name;
logger.info(
'New (previously unknown) item:\n' +
`{\n name: "${item.name}",\n type: ItemType.${type},\n id: ${item.id}\n},`,
);
}
function showLocalDressRecord({
dress_record_id,
name,
buddy_id,
}: {
dress_record_id: number;
name: string;
buddy_id: number;
}) {
logger.info(
'New (previously unknown) dress record:\n' +
`{\n name: "${name}",\n id: ${dress_record_id},\n characterId: ${buddy_id},\n},`,
);
}
function checkKnownEnlir(item: PrizeItem, ...enlirData: any)
|
function checkKnownDressRecord(item: PrizeItem) {
if (dressRecordsById[item.id] == null) {
const match = item.image_path.match(/(\d+)\/\d+\/\d+\.png/);
const buddyId = match ? +match[1] : 0;
showLocalDressRecord({ dress_record_id: item.id, name: item.name, buddy_id: buddyId });
}
}
function checkKnownItems(item: PrizeItem) {
if (localItemsById[item.id] == null) {
if (item.type_name === 'MUSIC_TICKET') {
// Music tickets are regularly released and are easy to dynamically add,
// so we won't try tracking them ourselves.
return;
}
showUnknownItem(item);
addLocalItem(item);
showLocalItem(item);
}
}
function checkItem(item: PrizeItem) {
if (item.type_name === 'BEAST') {
checkKnownEnlir(item, enlir.magicites);
} else if (item.type_name === 'EQUIPMENT') {
checkKnownEnlir(item, enlir.relics, enlir.heroArtifacts);
} else if (item.type_name === 'ABILITY') {
checkKnownEnlir(item, enlir.abilities);
} else if (
item.type_name === 'BUDDY' ||
item.type_name === 'MEMORY_CRYSTAL' ||
item.type_name === 'RECORD_MATERIA'
) {
// FIXME: Need an internal-id-indexed version of characters, memory crystals, record materia
} else if (item.type_name === 'DRESS_RECORD') {
checkKnownDressRecord(item);
} else {
checkKnownItems(item);
}
}
function checkPartyItems(
partyItems: Array<{ name: string; id: number; image_path: string }>,
type: ItemType,
) {
for (const i of _.sortBy(partyItems, 'id')) {
checkKnownItems({
name: i.name,
id: i.id,
type_name: type.toUpperCase() as schemas.ItemTypeName,
image_path: i.image_path,
});
}
}
function checkPartyDressRecords(data: schemas.PartyList | schemas.PartyListOther) {
for (const i of _.sortBy(data.dress_records, 'dress_record_id')) {
if (dressRecordsById[i.dress_record_id] == null) {
showLocalDressRecord(i);
}
}
}
function checkAllPartyItems(data: schemas.PartyList | schemas.PartyListOther) {
// No need to check isRecordDungeonPartyList, as long as we're only
// looking for new / previously unknown items.
checkPartyItems(data.equipment_hyper_evolve_materials, ItemType.DarkMatter);
checkPartyItems(data.equipment_sp_materials, ItemType.UpgradeMaterial);
checkPartyItems(data.materials, ItemType.Orb);
checkPartyItems(data.grow_eggs, ItemType.GrowthEgg);
checkPartyItems(data.sphere_materials, ItemType.Mote);
checkPartyDressRecords(data);
}
function handleWinBattle(data: schemas.WinBattle) {
_.forEach(data.result.prize_master, (item) => {
checkItem({
id: +item.item_id,
type_name: item.type_name,
name: item.name,
image_path: item.image_path,
});
});
}
interface EnlirEntity {
id: number;
name: string;
gl: boolean;
realm: EnlirRealm | null;
}
interface CheckedEntity<T extends EnlirEntity> {
enlirItem: T;
updateRelease: boolean;
updateName: string | undefined;
}
function compareGlEntity<T1 extends { id: number; name: string }, T2 extends EnlirEntity>(
callback: (message: string) => void,
item: T1,
enlirItems: { [id: number]: T2 },
description: string,
source: string,
trim?: (name: string) => string,
): CheckedEntity<T2> | null {
const enlirItem = enlirItems[item.id];
if (!enlirItem) {
callback(`Item update: Unknown ${description} ID ${item.id}, ${item.name}, from ${source}`);
return null;
}
let updateRelease = false;
if (!enlirItem.gl) {
callback(`Item update: ${description} ID ${item.id}, ${item.name}, is now released in global`);
updateRelease = true;
}
const trimmedName = trim ? trim(item.name) : item.name.trimRight();
let updateName: string | undefined;
if (enlirItem.name !== trimmedName) {
callback(
`Item update: ${description} ID ${item.id}, ${item.name}, ` +
`is named ${enlirItem.name} in Enlir`,
);
updateName = item.name;
}
return {
enlirItem,
updateRelease,
updateName,
};
}
function removeRealm<T extends EnlirEntity>(enlirItem: T, name: string) {
if (!enlirItem.realm) {
return name;
}
const re = new RegExp(' \\(' + _.escapeRegExp(enlirItem.realm) + '\\)$');
return name.replace(re, '');
}
function showUpdateCommands<T extends EnlirEntity>(
checked: Array<CheckedEntity<T>>,
tabName: string,
callback: (message: string) => void,
) {
const releaseIds = checked.filter((i) => i.updateRelease).map((i) => i.enlirItem.id);
if (releaseIds.length) {
callback(`update-enlir.ts releaseInGl ${tabName} ${releaseIds.join(' ')}`);
}
const renames = checked
.filter((i) => i.updateName)
.map(
(i) =>
[i.enlirItem.id, '"' + removeRealm(i.enlirItem, i.updateName!) + '"'] as [number, string],
);
if (renames.length) {
callback(`update-enlir.ts rename ${tabName} ${_.flatten(renames).join(' ')}`);
}
}
function checkGlRelicDrawEquipment(
equipmentList: equipmentSchemas.Equipment[],
callback: (message: string) => void,
) {
const checkedRelics: Array<CheckedEntity<EnlirRelic>> = [];
const checkedSoulBreaks: Array<CheckedEntity<EnlirSoulBreak>> = [];
const checkedLegendMateria: Array<CheckedEntity<EnlirLegendMateria>> = [];
const trimRealm = (name: string) => {
return name
.replace(/ \(([IVX]+|Type-0|FFT|Beyond)\) *$/, '')
.replace(/ \([IVX]+-(.*?)\) *$/, ' ($1)');
};
for (const equipment of equipmentList) {
const { id, name, soul_strike, legend_materia } = equipment;
const relicName = `relic ${name} (ID ${id})`;
const compareRelic = compareGlEntity(
callback,
equipment,
enlir.relics,
'relic',
relicName,
trimRealm,
);
if (!compareRelic) {
continue;
}
checkedRelics.push(compareRelic);
if (soul_strike) {
const compareSoulBreak =
|
{
if (_.every(enlirData, (i) => i[item.id] == null)) {
showUnknownItem(item);
}
}
|
identifier_body
|
itemUpdates.ts
|
.info(
'New (previously unknown) item:\n' +
`{\n name: "${item.name}",\n type: ItemType.${type},\n id: ${item.id}\n},`,
);
}
function showLocalDressRecord({
dress_record_id,
name,
buddy_id,
}: {
dress_record_id: number;
name: string;
buddy_id: number;
}) {
logger.info(
'New (previously unknown) dress record:\n' +
`{\n name: "${name}",\n id: ${dress_record_id},\n characterId: ${buddy_id},\n},`,
);
}
function checkKnownEnlir(item: PrizeItem, ...enlirData: any) {
if (_.every(enlirData, (i) => i[item.id] == null)) {
showUnknownItem(item);
}
}
function checkKnownDressRecord(item: PrizeItem) {
if (dressRecordsById[item.id] == null) {
const match = item.image_path.match(/(\d+)\/\d+\/\d+\.png/);
const buddyId = match ? +match[1] : 0;
showLocalDressRecord({ dress_record_id: item.id, name: item.name, buddy_id: buddyId });
}
}
function checkKnownItems(item: PrizeItem) {
if (localItemsById[item.id] == null) {
if (item.type_name === 'MUSIC_TICKET') {
// Music tickets are regularly released and are easy to dynamically add,
// so we won't try tracking them ourselves.
return;
}
showUnknownItem(item);
addLocalItem(item);
showLocalItem(item);
}
}
function checkItem(item: PrizeItem) {
if (item.type_name === 'BEAST') {
checkKnownEnlir(item, enlir.magicites);
} else if (item.type_name === 'EQUIPMENT') {
checkKnownEnlir(item, enlir.relics, enlir.heroArtifacts);
} else if (item.type_name === 'ABILITY') {
checkKnownEnlir(item, enlir.abilities);
} else if (
item.type_name === 'BUDDY' ||
item.type_name === 'MEMORY_CRYSTAL' ||
item.type_name === 'RECORD_MATERIA'
) {
// FIXME: Need an internal-id-indexed version of characters, memory crystals, record materia
} else if (item.type_name === 'DRESS_RECORD') {
checkKnownDressRecord(item);
} else {
checkKnownItems(item);
}
}
function checkPartyItems(
partyItems: Array<{ name: string; id: number; image_path: string }>,
type: ItemType,
) {
for (const i of _.sortBy(partyItems, 'id')) {
checkKnownItems({
name: i.name,
id: i.id,
type_name: type.toUpperCase() as schemas.ItemTypeName,
image_path: i.image_path,
});
}
}
function checkPartyDressRecords(data: schemas.PartyList | schemas.PartyListOther) {
for (const i of _.sortBy(data.dress_records, 'dress_record_id')) {
if (dressRecordsById[i.dress_record_id] == null) {
showLocalDressRecord(i);
}
}
}
function checkAllPartyItems(data: schemas.PartyList | schemas.PartyListOther) {
// No need to check isRecordDungeonPartyList, as long as we're only
// looking for new / previously unknown items.
checkPartyItems(data.equipment_hyper_evolve_materials, ItemType.DarkMatter);
checkPartyItems(data.equipment_sp_materials, ItemType.UpgradeMaterial);
checkPartyItems(data.materials, ItemType.Orb);
checkPartyItems(data.grow_eggs, ItemType.GrowthEgg);
checkPartyItems(data.sphere_materials, ItemType.Mote);
checkPartyDressRecords(data);
}
function handleWinBattle(data: schemas.WinBattle) {
_.forEach(data.result.prize_master, (item) => {
checkItem({
id: +item.item_id,
type_name: item.type_name,
name: item.name,
image_path: item.image_path,
});
});
}
interface EnlirEntity {
id: number;
name: string;
gl: boolean;
realm: EnlirRealm | null;
}
interface CheckedEntity<T extends EnlirEntity> {
enlirItem: T;
updateRelease: boolean;
updateName: string | undefined;
}
function compareGlEntity<T1 extends { id: number; name: string }, T2 extends EnlirEntity>(
callback: (message: string) => void,
item: T1,
enlirItems: { [id: number]: T2 },
description: string,
source: string,
trim?: (name: string) => string,
): CheckedEntity<T2> | null {
const enlirItem = enlirItems[item.id];
if (!enlirItem) {
callback(`Item update: Unknown ${description} ID ${item.id}, ${item.name}, from ${source}`);
return null;
}
let updateRelease = false;
if (!enlirItem.gl) {
callback(`Item update: ${description} ID ${item.id}, ${item.name}, is now released in global`);
updateRelease = true;
}
const trimmedName = trim ? trim(item.name) : item.name.trimRight();
let updateName: string | undefined;
if (enlirItem.name !== trimmedName) {
callback(
`Item update: ${description} ID ${item.id}, ${item.name}, ` +
`is named ${enlirItem.name} in Enlir`,
);
updateName = item.name;
}
return {
enlirItem,
updateRelease,
updateName,
};
}
function removeRealm<T extends EnlirEntity>(enlirItem: T, name: string) {
if (!enlirItem.realm) {
return name;
}
const re = new RegExp(' \\(' + _.escapeRegExp(enlirItem.realm) + '\\)$');
return name.replace(re, '');
}
function showUpdateCommands<T extends EnlirEntity>(
checked: Array<CheckedEntity<T>>,
tabName: string,
callback: (message: string) => void,
) {
const releaseIds = checked.filter((i) => i.updateRelease).map((i) => i.enlirItem.id);
if (releaseIds.length) {
callback(`update-enlir.ts releaseInGl ${tabName} ${releaseIds.join(' ')}`);
}
const renames = checked
.filter((i) => i.updateName)
.map(
(i) =>
[i.enlirItem.id, '"' + removeRealm(i.enlirItem, i.updateName!) + '"'] as [number, string],
);
if (renames.length) {
callback(`update-enlir.ts rename ${tabName} ${_.flatten(renames).join(' ')}`);
}
}
function checkGlRelicDrawEquipment(
equipmentList: equipmentSchemas.Equipment[],
callback: (message: string) => void,
) {
const checkedRelics: Array<CheckedEntity<EnlirRelic>> = [];
const checkedSoulBreaks: Array<CheckedEntity<EnlirSoulBreak>> = [];
const checkedLegendMateria: Array<CheckedEntity<EnlirLegendMateria>> = [];
const trimRealm = (name: string) => {
return name
.replace(/ \(([IVX]+|Type-0|FFT|Beyond)\) *$/, '')
.replace(/ \([IVX]+-(.*?)\) *$/, ' ($1)');
};
for (const equipment of equipmentList) {
const { id, name, soul_strike, legend_materia } = equipment;
const relicName = `relic ${name} (ID ${id})`;
const compareRelic = compareGlEntity(
callback,
equipment,
enlir.relics,
'relic',
relicName,
trimRealm,
);
if (!compareRelic) {
continue;
}
checkedRelics.push(compareRelic);
if (soul_strike) {
const compareSoulBreak = compareGlEntity(
callback,
soul_strike,
enlir.soulBreaks,
'soul break',
relicName,
);
if (compareSoulBreak) {
checkedSoulBreaks.push(compareSoulBreak);
}
}
if (legend_materia) {
const compareLegendMateria = compareGlEntity(
callback,
legend_materia,
enlir.legendMateria,
'legend materia',
relicName,
trimRealm,
);
if (compareLegendMateria) {
checkedLegendMateria.push(compareLegendMateria);
}
}
}
showUpdateCommands(checkedRelics, 'relics', callback);
showUpdateCommands(checkedSoulBreaks, 'soulBreaks', callback);
|
showUpdateCommands(checkedLegendMateria, 'legendMateria', callback);
}
function getGachaShowEquipment(data: gachaSchemas.GachaShow, currentTime: number) {
const equipmentList: equipmentSchemas.Equipment[] = [];
|
random_line_split
|
|
goog-varint.ts
|
) {
this.assertBounds();
return [lowBits, highBits];
}
}
let middleByte = this.buf[this.pos++];
// last four bits of the first 32 bit number
lowBits |= (middleByte & 0x0F) << 28;
// 3 upper bits are part of the next 32 bit number
highBits = (middleByte & 0x70) >> 4;
if ((middleByte & 0x80) == 0) {
this.assertBounds();
return [lowBits, highBits];
}
for (let shift = 3; shift <= 31; shift += 7) {
let b = this.buf[this.pos++];
highBits |= (b & 0x7F) << shift;
if ((b & 0x80) == 0) {
this.assertBounds();
return [lowBits, highBits];
}
}
throw new Error('invalid varint');
}
/**
* Write a 64 bit varint, given as two JS numbers, to the given bytes array.
*
* Copyright 2008 Google Inc. All rights reserved.
*
* See https://github.com/protocolbuffers/protobuf/blob/8a71927d74a4ce34efe2d8769fda198f52d20d12/js/experimental/runtime/kernel/writer.js#L344
*/
export function varint64write(lo: number, hi: number, bytes: number[]): void {
for (let i = 0; i < 28; i = i + 7) {
const shift = lo >>> i;
const hasNext = !((shift >>> 7) == 0 && hi == 0);
const byte = (hasNext ? shift | 0x80 : shift) & 0xFF;
bytes.push(byte);
if (!hasNext) {
return;
}
}
const splitBits = ((lo >>> 28) & 0x0F) | ((hi & 0x07) << 4);
const hasMoreBits = !((hi >> 3) == 0);
bytes.push(
(hasMoreBits ? splitBits | 0x80 : splitBits) & 0xFF);
if (!hasMoreBits) {
return;
}
for (let i = 3; i < 31; i = i + 7) {
const shift = hi >>> i;
const hasNext = !((shift >>> 7) == 0);
const byte = (hasNext ? shift | 0x80 : shift) & 0xFF;
bytes.push(byte);
if (!hasNext) {
return;
}
}
bytes.push((hi >>> 31) & 0x01);
}
// constants for binary math
const TWO_PWR_32_DBL = (1 << 16) * (1 << 16);
/**
* Parse decimal string of 64 bit integer value as two JS numbers.
*
* Returns tuple:
* [0]: minus sign?
* [1]: low bits
* [2]: high bits
*
* Copyright 2008 Google Inc.
*/
export function int64fromString(dec: string): [boolean, number, number] {
// Check for minus sign.
let minus = dec[0] == '-';
if (minus)
dec = dec.slice(1);
// Work 6 decimal digits at a time, acting like we're converting base 1e6
// digits to binary. This is safe to do with floating point math because
// Number.isSafeInteger(ALL_32_BITS * 1e6) == true.
const base = 1e6;
let lowBits = 0;
let highBits = 0;
function add1e6digit(begin: number, end?: number) {
// Note: Number('') is 0.
const digit1e6 = Number(dec.slice(begin, end));
highBits *= base;
lowBits = lowBits * base + digit1e6;
// Carry bits from lowBits to
if (lowBits >= TWO_PWR_32_DBL) {
highBits = highBits + ((lowBits / TWO_PWR_32_DBL) | 0);
lowBits = lowBits % TWO_PWR_32_DBL;
}
}
add1e6digit(-24, -18);
add1e6digit(-18, -12);
add1e6digit(-12, -6);
add1e6digit(-6);
return [minus, lowBits, highBits];
}
/**
* Format 64 bit integer value (as two JS numbers) to decimal string.
*
* Copyright 2008 Google Inc.
*/
export function int64toString(bitsLow: number, bitsHigh: number): string {
// Skip the expensive conversion if the number is small enough to use the
// built-in conversions.
if (bitsHigh <= 0x1FFFFF) {
return '' + (TWO_PWR_32_DBL * bitsHigh + bitsLow);
}
// What this code is doing is essentially converting the input number from
// base-2 to base-1e7, which allows us to represent the 64-bit range with
// only 3 (very large) digits. Those digits are then trivial to convert to
// a base-10 string.
// The magic numbers used here are -
// 2^24 = 16777216 = (1,6777216) in base-1e7.
// 2^48 = 281474976710656 = (2,8147497,6710656) in base-1e7.
// Split 32:32 representation into 16:24:24 representation so our
// intermediate digits don't overflow.
let low = bitsLow & 0xFFFFFF;
let mid = (((bitsLow >>> 24) | (bitsHigh << 8)) >>> 0) & 0xFFFFFF;
let high = (bitsHigh >> 16) & 0xFFFF;
// Assemble our three base-1e7 digits, ignoring carries. The maximum
// value in a digit at this step is representable as a 48-bit integer, which
// can be stored in a 64-bit floating point number.
let digitA = low + (mid * 6777216) + (high * 6710656);
let digitB = mid + (high * 8147497);
let digitC = (high * 2);
// Apply carries from A to B and from B to C.
let base = 10000000;
if (digitA >= base) {
digitB += Math.floor(digitA / base);
digitA %= base;
}
if (digitB >= base) {
digitC += Math.floor(digitB / base);
digitB %= base;
}
// Convert base-1e7 digits to base-10, with optional leading zeroes.
function decimalFrom1e7(digit1e7: number, needLeadingZeros: number) {
let partial = digit1e7 ? String(digit1e7) : '';
if (needLeadingZeros) {
return '0000000'.slice(partial.length) + partial;
}
return partial;
}
return decimalFrom1e7(digitC, /*needLeadingZeros=*/ 0) +
decimalFrom1e7(digitB, /*needLeadingZeros=*/ digitC) +
// If the final 1e7 digit didn't need leading zeros, we would have
// returned via the trivial code path at the top.
decimalFrom1e7(digitA, /*needLeadingZeros=*/ 1);
}
/**
* Write a 32 bit varint, signed or unsigned. Same as `varint64write(0, value, bytes)`
*
* Copyright 2008 Google Inc. All rights reserved.
*
* See https://github.com/protocolbuffers/protobuf/blob/1b18833f4f2a2f681f4e4a25cdf3b0a43115ec26/js/binary/encoder.js#L144
*/
export function varint32write(value: number, bytes: number[]): void
|
{
if (value >= 0) {
// write value as varint 32
while (value > 0x7f) {
bytes.push((value & 0x7f) | 0x80);
value = value >>> 7;
}
bytes.push(value);
} else {
for (let i = 0; i < 9; i++) {
bytes.push(value & 127 | 128);
value = value >> 7;
}
bytes.push(1);
}
}
|
identifier_body
|
|
goog-varint.ts
|
.
//
// Code generated by the Protocol Buffer compiler is owned by the owner
// of the input file used when generating it. This code is not
// standalone and requires a support library to be linked with it. This
// support library is itself covered by the above license.
/**
* Read a 64 bit varint as two JS numbers.
*
* Returns tuple:
* [0]: low bits
* [0]: high bits
*
* Copyright 2008 Google Inc. All rights reserved.
*
* See https://github.com/protocolbuffers/protobuf/blob/8a71927d74a4ce34efe2d8769fda198f52d20d12/js/experimental/runtime/kernel/buffer_decoder.js#L175
*/
export function varint64read(this: ReaderLike): [number, number] {
let lowBits = 0;
let highBits = 0;
for (let shift = 0; shift < 28; shift += 7) {
let b = this.buf[this.pos++];
lowBits |= (b & 0x7F) << shift;
if ((b & 0x80) == 0) {
this.assertBounds();
return [lowBits, highBits];
}
}
let middleByte = this.buf[this.pos++];
// last four bits of the first 32 bit number
lowBits |= (middleByte & 0x0F) << 28;
// 3 upper bits are part of the next 32 bit number
highBits = (middleByte & 0x70) >> 4;
if ((middleByte & 0x80) == 0) {
this.assertBounds();
return [lowBits, highBits];
}
for (let shift = 3; shift <= 31; shift += 7) {
let b = this.buf[this.pos++];
highBits |= (b & 0x7F) << shift;
if ((b & 0x80) == 0) {
this.assertBounds();
return [lowBits, highBits];
}
}
throw new Error('invalid varint');
}
/**
* Write a 64 bit varint, given as two JS numbers, to the given bytes array.
*
* Copyright 2008 Google Inc. All rights reserved.
*
* See https://github.com/protocolbuffers/protobuf/blob/8a71927d74a4ce34efe2d8769fda198f52d20d12/js/experimental/runtime/kernel/writer.js#L344
*/
export function varint64write(lo: number, hi: number, bytes: number[]): void {
for (let i = 0; i < 28; i = i + 7) {
const shift = lo >>> i;
const hasNext = !((shift >>> 7) == 0 && hi == 0);
const byte = (hasNext ? shift | 0x80 : shift) & 0xFF;
bytes.push(byte);
if (!hasNext) {
return;
}
}
const splitBits = ((lo >>> 28) & 0x0F) | ((hi & 0x07) << 4);
const hasMoreBits = !((hi >> 3) == 0);
bytes.push(
(hasMoreBits ? splitBits | 0x80 : splitBits) & 0xFF);
if (!hasMoreBits) {
return;
}
for (let i = 3; i < 31; i = i + 7) {
const shift = hi >>> i;
const hasNext = !((shift >>> 7) == 0);
const byte = (hasNext ? shift | 0x80 : shift) & 0xFF;
bytes.push(byte);
if (!hasNext) {
return;
}
}
|
// constants for binary math
const TWO_PWR_32_DBL = (1 << 16) * (1 << 16);
/**
* Parse decimal string of 64 bit integer value as two JS numbers.
*
* Returns tuple:
* [0]: minus sign?
* [1]: low bits
* [2]: high bits
*
* Copyright 2008 Google Inc.
*/
export function int64fromString(dec: string): [boolean, number, number] {
// Check for minus sign.
let minus = dec[0] == '-';
if (minus)
dec = dec.slice(1);
// Work 6 decimal digits at a time, acting like we're converting base 1e6
// digits to binary. This is safe to do with floating point math because
// Number.isSafeInteger(ALL_32_BITS * 1e6) == true.
const base = 1e6;
let lowBits = 0;
let highBits = 0;
function add1e6digit(begin: number, end?: number) {
// Note: Number('') is 0.
const digit1e6 = Number(dec.slice(begin, end));
highBits *= base;
lowBits = lowBits * base + digit1e6;
// Carry bits from lowBits to
if (lowBits >= TWO_PWR_32_DBL) {
highBits = highBits + ((lowBits / TWO_PWR_32_DBL) | 0);
lowBits = lowBits % TWO_PWR_32_DBL;
}
}
add1e6digit(-24, -18);
add1e6digit(-18, -12);
add1e6digit(-12, -6);
add1e6digit(-6);
return [minus, lowBits, highBits];
}
/**
* Format 64 bit integer value (as two JS numbers) to decimal string.
*
* Copyright 2008 Google Inc.
*/
export function int64toString(bitsLow: number, bitsHigh: number): string {
// Skip the expensive conversion if the number is small enough to use the
// built-in conversions.
if (bitsHigh <= 0x1FFFFF) {
return '' + (TWO_PWR_32_DBL * bitsHigh + bitsLow);
}
// What this code is doing is essentially converting the input number from
// base-2 to base-1e7, which allows us to represent the 64-bit range with
// only 3 (very large) digits. Those digits are then trivial to convert to
// a base-10 string.
// The magic numbers used here are -
// 2^24 = 16777216 = (1,6777216) in base-1e7.
// 2^48 = 281474976710656 = (2,8147497,6710656) in base-1e7.
// Split 32:32 representation into 16:24:24 representation so our
// intermediate digits don't overflow.
let low = bitsLow & 0xFFFFFF;
let mid = (((bitsLow >>> 24) | (bitsHigh << 8)) >>> 0) & 0xFFFFFF;
let high = (bitsHigh >> 16) & 0xFFFF;
// Assemble our three base-1e7 digits, ignoring carries. The maximum
// value in a digit at this step is representable as a 48-bit integer, which
// can be stored in a 64-bit floating point number.
let digitA = low + (mid * 6777216) + (high * 6710656);
let digitB = mid + (high * 8147497);
let digitC = (high * 2);
// Apply carries from A to B and from B to C.
let base = 10000000;
if (digitA >= base) {
digitB += Math.floor(digitA / base);
digitA %= base;
}
if (digitB >= base) {
digitC += Math.floor(digitB / base);
digitB %= base;
}
// Convert base-1e7 digits to base-10, with optional leading zeroes.
function decimalFrom1e7(digit1e7: number, needLeadingZeros: number) {
let partial = digit1e7 ? String(digit1e7) : '';
if (needLeadingZeros) {
return '0000000'.slice(partial.length) + partial;
}
return partial;
}
return decimalFrom1e7(digitC, /*needLeadingZeros=*/ 0) +
decimalFrom1e7(digitB, /*needLeadingZeros=*/ digitC) +
// If the final 1e7 digit didn't need leading zeros, we would have
// returned via the trivial code path at the top.
|
bytes.push((hi >>> 31) & 0x01);
}
|
random_line_split
|
goog-varint.ts
|
.
//
// Code generated by the Protocol Buffer compiler is owned by the owner
// of the input file used when generating it. This code is not
// standalone and requires a support library to be linked with it. This
// support library is itself covered by the above license.
/**
* Read a 64 bit varint as two JS numbers.
*
* Returns tuple:
* [0]: low bits
* [0]: high bits
*
* Copyright 2008 Google Inc. All rights reserved.
*
* See https://github.com/protocolbuffers/protobuf/blob/8a71927d74a4ce34efe2d8769fda198f52d20d12/js/experimental/runtime/kernel/buffer_decoder.js#L175
*/
export function varint64read(this: ReaderLike): [number, number] {
let lowBits = 0;
let highBits = 0;
for (let shift = 0; shift < 28; shift += 7) {
let b = this.buf[this.pos++];
lowBits |= (b & 0x7F) << shift;
if ((b & 0x80) == 0) {
this.assertBounds();
return [lowBits, highBits];
}
}
let middleByte = this.buf[this.pos++];
// last four bits of the first 32 bit number
lowBits |= (middleByte & 0x0F) << 28;
// 3 upper bits are part of the next 32 bit number
highBits = (middleByte & 0x70) >> 4;
if ((middleByte & 0x80) == 0) {
this.assertBounds();
return [lowBits, highBits];
}
for (let shift = 3; shift <= 31; shift += 7) {
let b = this.buf[this.pos++];
highBits |= (b & 0x7F) << shift;
if ((b & 0x80) == 0) {
this.assertBounds();
return [lowBits, highBits];
}
}
throw new Error('invalid varint');
}
/**
* Write a 64 bit varint, given as two JS numbers, to the given bytes array.
*
* Copyright 2008 Google Inc. All rights reserved.
*
* See https://github.com/protocolbuffers/protobuf/blob/8a71927d74a4ce34efe2d8769fda198f52d20d12/js/experimental/runtime/kernel/writer.js#L344
*/
export function
|
(lo: number, hi: number, bytes: number[]): void {
for (let i = 0; i < 28; i = i + 7) {
const shift = lo >>> i;
const hasNext = !((shift >>> 7) == 0 && hi == 0);
const byte = (hasNext ? shift | 0x80 : shift) & 0xFF;
bytes.push(byte);
if (!hasNext) {
return;
}
}
const splitBits = ((lo >>> 28) & 0x0F) | ((hi & 0x07) << 4);
const hasMoreBits = !((hi >> 3) == 0);
bytes.push(
(hasMoreBits ? splitBits | 0x80 : splitBits) & 0xFF);
if (!hasMoreBits) {
return;
}
for (let i = 3; i < 31; i = i + 7) {
const shift = hi >>> i;
const hasNext = !((shift >>> 7) == 0);
const byte = (hasNext ? shift | 0x80 : shift) & 0xFF;
bytes.push(byte);
if (!hasNext) {
return;
}
}
bytes.push((hi >>> 31) & 0x01);
}
// constants for binary math
const TWO_PWR_32_DBL = (1 << 16) * (1 << 16);
/**
* Parse decimal string of 64 bit integer value as two JS numbers.
*
* Returns tuple:
* [0]: minus sign?
* [1]: low bits
* [2]: high bits
*
* Copyright 2008 Google Inc.
*/
export function int64fromString(dec: string): [boolean, number, number] {
// Check for minus sign.
let minus = dec[0] == '-';
if (minus)
dec = dec.slice(1);
// Work 6 decimal digits at a time, acting like we're converting base 1e6
// digits to binary. This is safe to do with floating point math because
// Number.isSafeInteger(ALL_32_BITS * 1e6) == true.
const base = 1e6;
let lowBits = 0;
let highBits = 0;
function add1e6digit(begin: number, end?: number) {
// Note: Number('') is 0.
const digit1e6 = Number(dec.slice(begin, end));
highBits *= base;
lowBits = lowBits * base + digit1e6;
// Carry bits from lowBits to
if (lowBits >= TWO_PWR_32_DBL) {
highBits = highBits + ((lowBits / TWO_PWR_32_DBL) | 0);
lowBits = lowBits % TWO_PWR_32_DBL;
}
}
add1e6digit(-24, -18);
add1e6digit(-18, -12);
add1e6digit(-12, -6);
add1e6digit(-6);
return [minus, lowBits, highBits];
}
/**
* Format 64 bit integer value (as two JS numbers) to decimal string.
*
* Copyright 2008 Google Inc.
*/
export function int64toString(bitsLow: number, bitsHigh: number): string {
// Skip the expensive conversion if the number is small enough to use the
// built-in conversions.
if (bitsHigh <= 0x1FFFFF) {
return '' + (TWO_PWR_32_DBL * bitsHigh + bitsLow);
}
// What this code is doing is essentially converting the input number from
// base-2 to base-1e7, which allows us to represent the 64-bit range with
// only 3 (very large) digits. Those digits are then trivial to convert to
// a base-10 string.
// The magic numbers used here are -
// 2^24 = 16777216 = (1,6777216) in base-1e7.
// 2^48 = 281474976710656 = (2,8147497,6710656) in base-1e7.
// Split 32:32 representation into 16:24:24 representation so our
// intermediate digits don't overflow.
let low = bitsLow & 0xFFFFFF;
let mid = (((bitsLow >>> 24) | (bitsHigh << 8)) >>> 0) & 0xFFFFFF;
let high = (bitsHigh >> 16) & 0xFFFF;
// Assemble our three base-1e7 digits, ignoring carries. The maximum
// value in a digit at this step is representable as a 48-bit integer, which
// can be stored in a 64-bit floating point number.
let digitA = low + (mid * 6777216) + (high * 6710656);
let digitB = mid + (high * 8147497);
let digitC = (high * 2);
// Apply carries from A to B and from B to C.
let base = 10000000;
if (digitA >= base) {
digitB += Math.floor(digitA / base);
digitA %= base;
}
if (digitB >= base) {
digitC += Math.floor(digitB / base);
digitB %= base;
}
// Convert base-1e7 digits to base-10, with optional leading zeroes.
function decimalFrom1e7(digit1e7: number, needLeadingZeros: number) {
let partial = digit1e7 ? String(digit1e7) : '';
if (needLeadingZeros) {
return '0000000'.slice(partial.length) + partial;
}
return partial;
}
return decimalFrom1e7(digitC, /*needLeadingZeros=*/ 0) +
decimalFrom1e7(digitB, /*needLeadingZeros=*/ digitC) +
// If the final 1e7 digit didn't need leading zeros, we would have
// returned via the trivial code path at the top
|
varint64write
|
identifier_name
|
goog-varint.ts
|
//
// Code generated by the Protocol Buffer compiler is owned by the owner
// of the input file used when generating it. This code is not
// standalone and requires a support library to be linked with it. This
// support library is itself covered by the above license.
/**
* Read a 64 bit varint as two JS numbers.
*
* Returns tuple:
* [0]: low bits
* [0]: high bits
*
* Copyright 2008 Google Inc. All rights reserved.
*
* See https://github.com/protocolbuffers/protobuf/blob/8a71927d74a4ce34efe2d8769fda198f52d20d12/js/experimental/runtime/kernel/buffer_decoder.js#L175
*/
export function varint64read(this: ReaderLike): [number, number] {
let lowBits = 0;
let highBits = 0;
for (let shift = 0; shift < 28; shift += 7) {
let b = this.buf[this.pos++];
lowBits |= (b & 0x7F) << shift;
if ((b & 0x80) == 0) {
this.assertBounds();
return [lowBits, highBits];
}
}
let middleByte = this.buf[this.pos++];
// last four bits of the first 32 bit number
lowBits |= (middleByte & 0x0F) << 28;
// 3 upper bits are part of the next 32 bit number
highBits = (middleByte & 0x70) >> 4;
if ((middleByte & 0x80) == 0) {
this.assertBounds();
return [lowBits, highBits];
}
for (let shift = 3; shift <= 31; shift += 7) {
let b = this.buf[this.pos++];
highBits |= (b & 0x7F) << shift;
if ((b & 0x80) == 0) {
this.assertBounds();
return [lowBits, highBits];
}
}
throw new Error('invalid varint');
}
/**
* Write a 64 bit varint, given as two JS numbers, to the given bytes array.
*
* Copyright 2008 Google Inc. All rights reserved.
*
* See https://github.com/protocolbuffers/protobuf/blob/8a71927d74a4ce34efe2d8769fda198f52d20d12/js/experimental/runtime/kernel/writer.js#L344
*/
export function varint64write(lo: number, hi: number, bytes: number[]): void {
for (let i = 0; i < 28; i = i + 7) {
const shift = lo >>> i;
const hasNext = !((shift >>> 7) == 0 && hi == 0);
const byte = (hasNext ? shift | 0x80 : shift) & 0xFF;
bytes.push(byte);
if (!hasNext) {
return;
}
}
const splitBits = ((lo >>> 28) & 0x0F) | ((hi & 0x07) << 4);
const hasMoreBits = !((hi >> 3) == 0);
bytes.push(
(hasMoreBits ? splitBits | 0x80 : splitBits) & 0xFF);
if (!hasMoreBits) {
return;
}
for (let i = 3; i < 31; i = i + 7) {
const shift = hi >>> i;
const hasNext = !((shift >>> 7) == 0);
const byte = (hasNext ? shift | 0x80 : shift) & 0xFF;
bytes.push(byte);
if (!hasNext)
|
}
bytes.push((hi >>> 31) & 0x01);
}
// constants for binary math
const TWO_PWR_32_DBL = (1 << 16) * (1 << 16);
/**
* Parse decimal string of 64 bit integer value as two JS numbers.
*
* Returns tuple:
* [0]: minus sign?
* [1]: low bits
* [2]: high bits
*
* Copyright 2008 Google Inc.
*/
export function int64fromString(dec: string): [boolean, number, number] {
// Check for minus sign.
let minus = dec[0] == '-';
if (minus)
dec = dec.slice(1);
// Work 6 decimal digits at a time, acting like we're converting base 1e6
// digits to binary. This is safe to do with floating point math because
// Number.isSafeInteger(ALL_32_BITS * 1e6) == true.
const base = 1e6;
let lowBits = 0;
let highBits = 0;
function add1e6digit(begin: number, end?: number) {
// Note: Number('') is 0.
const digit1e6 = Number(dec.slice(begin, end));
highBits *= base;
lowBits = lowBits * base + digit1e6;
// Carry bits from lowBits to
if (lowBits >= TWO_PWR_32_DBL) {
highBits = highBits + ((lowBits / TWO_PWR_32_DBL) | 0);
lowBits = lowBits % TWO_PWR_32_DBL;
}
}
add1e6digit(-24, -18);
add1e6digit(-18, -12);
add1e6digit(-12, -6);
add1e6digit(-6);
return [minus, lowBits, highBits];
}
/**
* Format 64 bit integer value (as two JS numbers) to decimal string.
*
* Copyright 2008 Google Inc.
*/
export function int64toString(bitsLow: number, bitsHigh: number): string {
// Skip the expensive conversion if the number is small enough to use the
// built-in conversions.
if (bitsHigh <= 0x1FFFFF) {
return '' + (TWO_PWR_32_DBL * bitsHigh + bitsLow);
}
// What this code is doing is essentially converting the input number from
// base-2 to base-1e7, which allows us to represent the 64-bit range with
// only 3 (very large) digits. Those digits are then trivial to convert to
// a base-10 string.
// The magic numbers used here are -
// 2^24 = 16777216 = (1,6777216) in base-1e7.
// 2^48 = 281474976710656 = (2,8147497,6710656) in base-1e7.
// Split 32:32 representation into 16:24:24 representation so our
// intermediate digits don't overflow.
let low = bitsLow & 0xFFFFFF;
let mid = (((bitsLow >>> 24) | (bitsHigh << 8)) >>> 0) & 0xFFFFFF;
let high = (bitsHigh >> 16) & 0xFFFF;
// Assemble our three base-1e7 digits, ignoring carries. The maximum
// value in a digit at this step is representable as a 48-bit integer, which
// can be stored in a 64-bit floating point number.
let digitA = low + (mid * 6777216) + (high * 6710656);
let digitB = mid + (high * 8147497);
let digitC = (high * 2);
// Apply carries from A to B and from B to C.
let base = 10000000;
if (digitA >= base) {
digitB += Math.floor(digitA / base);
digitA %= base;
}
if (digitB >= base) {
digitC += Math.floor(digitB / base);
digitB %= base;
}
// Convert base-1e7 digits to base-10, with optional leading zeroes.
function decimalFrom1e7(digit1e7: number, needLeadingZeros: number) {
let partial = digit1e7 ? String(digit1e7) : '';
if (needLeadingZeros) {
return '0000000'.slice(partial.length) + partial;
}
return partial;
}
return decimalFrom1e7(digitC, /*needLeadingZeros=*/ 0) +
decimalFrom1e7(digitB, /*needLeadingZeros=*/ digitC) +
// If the final 1e7 digit didn't need leading zeros, we would have
// returned via the trivial code path at the top
|
{
return;
}
|
conditional_block
|
daemon.go
|
func (c *Command) canAccess(r *http.Request, user *userState) accessResult {
if c.AdminOnly && (c.UserOK || c.GuestOK || c.UntrustedOK) {
logger.Panicf("internal error: command cannot have AdminOnly together with any *OK flag")
}
if user != nil && !c.AdminOnly {
// Authenticated users do anything not requiring explicit admin.
return accessOK
}
// isUser means we have a UID for the request
isUser := false
pid, uid, socket, err := ucrednetGet(r.RemoteAddr)
if err == nil {
isUser = true
} else if err != errNoID {
logger.Noticef("unexpected error when attempting to get UID: %s", err)
return accessForbidden
}
isUntrusted := (socket == c.d.untrustedSocketPath)
_ = pid
_ = uid
if isUntrusted {
if c.UntrustedOK {
return accessOK
}
return accessUnauthorized
}
// the !AdminOnly check is redundant, but belt-and-suspenders
if r.Method == "GET" && !c.AdminOnly {
// Guest and user access restricted to GET requests
if c.GuestOK {
return accessOK
}
if isUser && c.UserOK {
return accessOK
}
}
// Remaining admin checks rely on identifying peer uid
if !isUser {
return accessUnauthorized
}
if uid == 0 || sys.UserID(uid) == sysGetuid() {
// Superuser and process owner can do anything.
return accessOK
}
if c.AdminOnly {
return accessUnauthorized
}
return accessUnauthorized
}
func userFromRequest(state interface{}, r *http.Request) (*userState, error) {
return nil, nil
}
func (c *Command) ServeHTTP(w http.ResponseWriter, r *http.Request) {
st := c.d.state
st.Lock()
user, err := userFromRequest(st, r)
if err != nil {
statusForbidden("forbidden").ServeHTTP(w, r)
return
}
st.Unlock()
// check if we are in degradedMode
if c.d.degradedErr != nil && r.Method != "GET" {
statusInternalError(c.d.degradedErr.Error()).ServeHTTP(w, r)
return
}
switch c.canAccess(r, user) {
case accessOK:
// nothing
case accessUnauthorized:
statusUnauthorized("access denied").ServeHTTP(w, r)
return
case accessForbidden:
statusForbidden("forbidden").ServeHTTP(w, r)
return
}
var rspf ResponseFunc
var rsp = statusMethodNotAllowed("method %q not allowed", r.Method)
switch r.Method {
case "GET":
rspf = c.GET
case "PUT":
rspf = c.PUT
case "POST":
rspf = c.POST
case "DELETE":
rspf = c.DELETE
}
if rspf != nil {
rsp = rspf(c, r, user)
}
if rsp, ok := rsp.(*resp); ok {
_, rst := st.Restarting()
switch rst {
case state.RestartSystem:
rsp.transmitMaintenance(errorKindSystemRestart, "system is restarting")
case state.RestartDaemon:
rsp.transmitMaintenance(errorKindDaemonRestart, "daemon is restarting")
case state.RestartSocket:
rsp.transmitMaintenance(errorKindDaemonRestart, "daemon is stopping to wait for socket activation")
}
if rsp.Type != ResponseTypeError {
st.Lock()
count, stamp := st.WarningsSummary()
st.Unlock()
rsp.addWarningsToMeta(count, stamp)
}
}
rsp.ServeHTTP(w, r)
}
type wrappedWriter struct {
w http.ResponseWriter
s int
}
func (w *wrappedWriter) Header() http.Header {
return w.w.Header()
}
func (w *wrappedWriter) Write(bs []byte) (int, error) {
return w.w.Write(bs)
}
func (w *wrappedWriter) WriteHeader(s int) {
w.w.WriteHeader(s)
w.s = s
}
func (w *wrappedWriter) Flush() {
if f, ok := w.w.(http.Flusher); ok {
f.Flush()
}
}
func logit(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ww := &wrappedWriter{w: w}
t0 := time.Now()
handler.ServeHTTP(ww, r)
t := time.Now().Sub(t0)
if !strings.Contains(r.URL.String(), "/v1/changes/") {
if strings.HasSuffix(r.RemoteAddr, ";")
|
else {
logger.Noticef("%s %s %s %s %d", r.RemoteAddr, r.Method, r.URL, t, ww.s)
}
}
})
}
// Init sets up the Daemon's internal workings.
// Don't call more than once.
func (d *Daemon) Init() error {
listenerMap := make(map[string]net.Listener)
if listener, err := getListener(d.normalSocketPath, listenerMap); err == nil {
d.generalListener = &ucrednetListener{Listener: listener}
} else {
return fmt.Errorf("when trying to listen on %s: %v", d.normalSocketPath, err)
}
if listener, err := getListener(d.untrustedSocketPath, listenerMap); err == nil {
// This listener may also be nil if that socket wasn't among
// the listeners, so check it before using it.
d.untrustedListener = &ucrednetListener{Listener: listener}
} else {
logger.Debugf("cannot get listener for %q: %v", d.untrustedSocketPath, err)
}
d.addRoutes()
logger.Noticef("Started daemon.")
return nil
}
// SetDegradedMode puts the daemon into an degraded mode which will the
// error given in the "err" argument for commands that are not marked
// as readonlyOK.
//
// This is useful to report errors to the client when the daemon
// cannot work because e.g. a sanity check failed or the system is out
// of diskspace.
//
// When the system is fine again calling "DegradedMode(nil)" is enough
// to put the daemon into full operation again.
func (d *Daemon) SetDegradedMode(err error) {
d.degradedErr = err
}
func (d *Daemon) addRoutes() {
d.router = mux.NewRouter()
for _, c := range api {
c.d = d
if c.PathPrefix == "" {
d.router.Handle(c.Path, c).Name(c.Path)
} else {
d.router.PathPrefix(c.PathPrefix).Handler(c).Name(c.PathPrefix)
}
}
// also maybe add a /favicon.ico handler...
d.router.NotFoundHandler = statusNotFound("invalid API endpoint requested")
}
type connTracker struct {
mu sync.Mutex
conns map[net.Conn]struct{}
}
func (ct *connTracker) CanStandby() bool {
ct.mu.Lock()
defer ct.mu.Unlock()
return len(ct.conns) == 0
}
func (ct *connTracker) trackConn(conn net.Conn, state http.ConnState) {
ct.mu.Lock()
defer ct.mu.Unlock()
// we ignore hijacked connections, if we do things with websockets
// we'll need custom shutdown handling for them
if state == http.StateNew || state == http.StateActive {
ct.conns[conn] = struct{}{}
} else {
delete(ct.conns, conn)
}
}
func (d *Daemon) CanStandby() bool {
return systemd.SocketAvailable()
}
func (d *Daemon) initStandbyHandling() {
d.standbyOpinions = standby.New(d.state)
d.standbyOpinions.AddOpinion(d)
d.standbyOpinions.AddOpinion(d.connTracker)
d.standbyOpinions.AddOpinion(d.overlord)
d.standbyOpinions.Start()
}
func (d *Daemon) Start() {
if d.rebootIsMissing {
// we need to schedule and wait for a system restart
d.tomb.Kill(nil)
// avoid systemd killing us again while we wait
systemdSdNotify("READY=1")
return
}
if d.overlord == nil {
panic("internal error: no Overlord")
}
d.StartTime = time.Now()
d.connTracker = &connTracker{conns: make(map[net.Conn]struct{})}
d.serve = &http.Server{
Handler: logit(d.router),
ConnState: d.connTracker.trackConn,
}
d.initStandbyHandling()
d.overlord.Loop()
d.tomb.Go(func() error {
if d.untrustedListener != nil {
d.tomb.Go(func() error {
if err := d.serve.Serve(d.untrustedListener); err != http.ErrServerClosed && d.tomb.Err() == tomb.ErrStillAlive {
return err
}
return nil
})
}
if err := d.serve.Serve(d.generalListener); err != http.ErrServer
|
{
logger.Debugf("%s %s %s %s %d", r.RemoteAddr, r.Method, r.URL, t, ww.s)
logger.Noticef("%s %s %s %d", r.Method, r.URL, t, ww.s)
}
|
conditional_block
|
daemon.go
|
OK {
return accessOK
}
return accessUnauthorized
}
// the !AdminOnly check is redundant, but belt-and-suspenders
if r.Method == "GET" && !c.AdminOnly {
// Guest and user access restricted to GET requests
if c.GuestOK {
return accessOK
}
if isUser && c.UserOK {
return accessOK
}
}
// Remaining admin checks rely on identifying peer uid
if !isUser {
return accessUnauthorized
}
if uid == 0 || sys.UserID(uid) == sysGetuid() {
// Superuser and process owner can do anything.
return accessOK
}
if c.AdminOnly {
return accessUnauthorized
}
return accessUnauthorized
}
func userFromRequest(state interface{}, r *http.Request) (*userState, error) {
return nil, nil
}
func (c *Command) ServeHTTP(w http.ResponseWriter, r *http.Request) {
st := c.d.state
st.Lock()
user, err := userFromRequest(st, r)
if err != nil {
statusForbidden("forbidden").ServeHTTP(w, r)
return
}
st.Unlock()
// check if we are in degradedMode
if c.d.degradedErr != nil && r.Method != "GET" {
statusInternalError(c.d.degradedErr.Error()).ServeHTTP(w, r)
return
}
switch c.canAccess(r, user) {
case accessOK:
// nothing
case accessUnauthorized:
statusUnauthorized("access denied").ServeHTTP(w, r)
return
case accessForbidden:
statusForbidden("forbidden").ServeHTTP(w, r)
return
}
var rspf ResponseFunc
var rsp = statusMethodNotAllowed("method %q not allowed", r.Method)
switch r.Method {
case "GET":
rspf = c.GET
case "PUT":
rspf = c.PUT
case "POST":
rspf = c.POST
case "DELETE":
rspf = c.DELETE
}
if rspf != nil {
rsp = rspf(c, r, user)
}
if rsp, ok := rsp.(*resp); ok {
_, rst := st.Restarting()
switch rst {
case state.RestartSystem:
rsp.transmitMaintenance(errorKindSystemRestart, "system is restarting")
case state.RestartDaemon:
rsp.transmitMaintenance(errorKindDaemonRestart, "daemon is restarting")
case state.RestartSocket:
rsp.transmitMaintenance(errorKindDaemonRestart, "daemon is stopping to wait for socket activation")
}
if rsp.Type != ResponseTypeError {
st.Lock()
count, stamp := st.WarningsSummary()
st.Unlock()
rsp.addWarningsToMeta(count, stamp)
}
}
rsp.ServeHTTP(w, r)
}
type wrappedWriter struct {
w http.ResponseWriter
s int
}
func (w *wrappedWriter) Header() http.Header {
return w.w.Header()
}
func (w *wrappedWriter) Write(bs []byte) (int, error) {
return w.w.Write(bs)
}
func (w *wrappedWriter) WriteHeader(s int) {
w.w.WriteHeader(s)
w.s = s
}
func (w *wrappedWriter) Flush() {
if f, ok := w.w.(http.Flusher); ok {
f.Flush()
}
}
func logit(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ww := &wrappedWriter{w: w}
t0 := time.Now()
handler.ServeHTTP(ww, r)
t := time.Now().Sub(t0)
if !strings.Contains(r.URL.String(), "/v1/changes/") {
if strings.HasSuffix(r.RemoteAddr, ";") {
logger.Debugf("%s %s %s %s %d", r.RemoteAddr, r.Method, r.URL, t, ww.s)
logger.Noticef("%s %s %s %d", r.Method, r.URL, t, ww.s)
} else {
logger.Noticef("%s %s %s %s %d", r.RemoteAddr, r.Method, r.URL, t, ww.s)
}
}
})
}
// Init sets up the Daemon's internal workings.
// Don't call more than once.
func (d *Daemon) Init() error {
listenerMap := make(map[string]net.Listener)
if listener, err := getListener(d.normalSocketPath, listenerMap); err == nil {
d.generalListener = &ucrednetListener{Listener: listener}
} else {
return fmt.Errorf("when trying to listen on %s: %v", d.normalSocketPath, err)
}
if listener, err := getListener(d.untrustedSocketPath, listenerMap); err == nil {
// This listener may also be nil if that socket wasn't among
// the listeners, so check it before using it.
d.untrustedListener = &ucrednetListener{Listener: listener}
} else {
logger.Debugf("cannot get listener for %q: %v", d.untrustedSocketPath, err)
}
d.addRoutes()
logger.Noticef("Started daemon.")
return nil
}
// SetDegradedMode puts the daemon into an degraded mode which will the
// error given in the "err" argument for commands that are not marked
// as readonlyOK.
//
// This is useful to report errors to the client when the daemon
// cannot work because e.g. a sanity check failed or the system is out
// of diskspace.
//
// When the system is fine again calling "DegradedMode(nil)" is enough
// to put the daemon into full operation again.
func (d *Daemon) SetDegradedMode(err error) {
d.degradedErr = err
}
func (d *Daemon) addRoutes() {
d.router = mux.NewRouter()
for _, c := range api {
c.d = d
if c.PathPrefix == "" {
d.router.Handle(c.Path, c).Name(c.Path)
} else {
d.router.PathPrefix(c.PathPrefix).Handler(c).Name(c.PathPrefix)
}
}
// also maybe add a /favicon.ico handler...
d.router.NotFoundHandler = statusNotFound("invalid API endpoint requested")
}
type connTracker struct {
mu sync.Mutex
conns map[net.Conn]struct{}
}
func (ct *connTracker) CanStandby() bool {
ct.mu.Lock()
defer ct.mu.Unlock()
return len(ct.conns) == 0
}
func (ct *connTracker) trackConn(conn net.Conn, state http.ConnState) {
ct.mu.Lock()
defer ct.mu.Unlock()
// we ignore hijacked connections, if we do things with websockets
// we'll need custom shutdown handling for them
if state == http.StateNew || state == http.StateActive {
ct.conns[conn] = struct{}{}
} else {
delete(ct.conns, conn)
}
}
func (d *Daemon) CanStandby() bool {
return systemd.SocketAvailable()
}
func (d *Daemon) initStandbyHandling() {
d.standbyOpinions = standby.New(d.state)
d.standbyOpinions.AddOpinion(d)
d.standbyOpinions.AddOpinion(d.connTracker)
d.standbyOpinions.AddOpinion(d.overlord)
d.standbyOpinions.Start()
}
func (d *Daemon) Start() {
if d.rebootIsMissing {
// we need to schedule and wait for a system restart
d.tomb.Kill(nil)
// avoid systemd killing us again while we wait
systemdSdNotify("READY=1")
return
}
if d.overlord == nil {
panic("internal error: no Overlord")
}
d.StartTime = time.Now()
d.connTracker = &connTracker{conns: make(map[net.Conn]struct{})}
d.serve = &http.Server{
Handler: logit(d.router),
ConnState: d.connTracker.trackConn,
}
d.initStandbyHandling()
d.overlord.Loop()
d.tomb.Go(func() error {
if d.untrustedListener != nil {
d.tomb.Go(func() error {
if err := d.serve.Serve(d.untrustedListener); err != http.ErrServerClosed && d.tomb.Err() == tomb.ErrStillAlive {
return err
}
return nil
})
}
if err := d.serve.Serve(d.generalListener); err != http.ErrServerClosed && d.tomb.Err() == tomb.ErrStillAlive {
return err
}
return nil
})
// notify systemd that we are ready
systemdSdNotify("READY=1")
}
// HandleRestart implements overlord.RestartBehavior.
func (d *Daemon) HandleRestart(t state.RestartType)
|
{
// die when asked to restart (systemd should get us back up!) etc
switch t {
case state.RestartDaemon:
case state.RestartSystem:
// try to schedule a fallback slow reboot already here
// in case we get stuck shutting down
if err := reboot(rebootWaitTimeout); err != nil {
logger.Noticef("%s", err)
}
d.mu.Lock()
defer d.mu.Unlock()
// remember we need to restart the system
d.restartSystem = true
case state.RestartSocket:
d.mu.Lock()
defer d.mu.Unlock()
d.restartSocket = true
default:
|
identifier_body
|
|
daemon.go
|
r)
t := time.Now().Sub(t0)
if !strings.Contains(r.URL.String(), "/v1/changes/") {
if strings.HasSuffix(r.RemoteAddr, ";") {
logger.Debugf("%s %s %s %s %d", r.RemoteAddr, r.Method, r.URL, t, ww.s)
logger.Noticef("%s %s %s %d", r.Method, r.URL, t, ww.s)
} else {
logger.Noticef("%s %s %s %s %d", r.RemoteAddr, r.Method, r.URL, t, ww.s)
}
}
})
}
// Init sets up the Daemon's internal workings.
// Don't call more than once.
func (d *Daemon) Init() error {
listenerMap := make(map[string]net.Listener)
if listener, err := getListener(d.normalSocketPath, listenerMap); err == nil {
d.generalListener = &ucrednetListener{Listener: listener}
} else {
return fmt.Errorf("when trying to listen on %s: %v", d.normalSocketPath, err)
}
if listener, err := getListener(d.untrustedSocketPath, listenerMap); err == nil {
// This listener may also be nil if that socket wasn't among
// the listeners, so check it before using it.
d.untrustedListener = &ucrednetListener{Listener: listener}
} else {
logger.Debugf("cannot get listener for %q: %v", d.untrustedSocketPath, err)
}
d.addRoutes()
logger.Noticef("Started daemon.")
return nil
}
// SetDegradedMode puts the daemon into an degraded mode which will the
// error given in the "err" argument for commands that are not marked
// as readonlyOK.
//
// This is useful to report errors to the client when the daemon
// cannot work because e.g. a sanity check failed or the system is out
// of diskspace.
//
// When the system is fine again calling "DegradedMode(nil)" is enough
// to put the daemon into full operation again.
func (d *Daemon) SetDegradedMode(err error) {
d.degradedErr = err
}
func (d *Daemon) addRoutes() {
d.router = mux.NewRouter()
for _, c := range api {
c.d = d
if c.PathPrefix == "" {
d.router.Handle(c.Path, c).Name(c.Path)
} else {
d.router.PathPrefix(c.PathPrefix).Handler(c).Name(c.PathPrefix)
}
}
// also maybe add a /favicon.ico handler...
d.router.NotFoundHandler = statusNotFound("invalid API endpoint requested")
}
type connTracker struct {
mu sync.Mutex
conns map[net.Conn]struct{}
}
func (ct *connTracker) CanStandby() bool {
ct.mu.Lock()
defer ct.mu.Unlock()
return len(ct.conns) == 0
}
func (ct *connTracker) trackConn(conn net.Conn, state http.ConnState) {
ct.mu.Lock()
defer ct.mu.Unlock()
// we ignore hijacked connections, if we do things with websockets
// we'll need custom shutdown handling for them
if state == http.StateNew || state == http.StateActive {
ct.conns[conn] = struct{}{}
} else {
delete(ct.conns, conn)
}
}
func (d *Daemon) CanStandby() bool {
return systemd.SocketAvailable()
}
func (d *Daemon) initStandbyHandling() {
d.standbyOpinions = standby.New(d.state)
d.standbyOpinions.AddOpinion(d)
d.standbyOpinions.AddOpinion(d.connTracker)
d.standbyOpinions.AddOpinion(d.overlord)
d.standbyOpinions.Start()
}
func (d *Daemon) Start() {
if d.rebootIsMissing {
// we need to schedule and wait for a system restart
d.tomb.Kill(nil)
// avoid systemd killing us again while we wait
systemdSdNotify("READY=1")
return
}
if d.overlord == nil {
panic("internal error: no Overlord")
}
d.StartTime = time.Now()
d.connTracker = &connTracker{conns: make(map[net.Conn]struct{})}
d.serve = &http.Server{
Handler: logit(d.router),
ConnState: d.connTracker.trackConn,
}
d.initStandbyHandling()
d.overlord.Loop()
d.tomb.Go(func() error {
if d.untrustedListener != nil {
d.tomb.Go(func() error {
if err := d.serve.Serve(d.untrustedListener); err != http.ErrServerClosed && d.tomb.Err() == tomb.ErrStillAlive {
return err
}
return nil
})
}
if err := d.serve.Serve(d.generalListener); err != http.ErrServerClosed && d.tomb.Err() == tomb.ErrStillAlive {
return err
}
return nil
})
// notify systemd that we are ready
systemdSdNotify("READY=1")
}
// HandleRestart implements overlord.RestartBehavior.
func (d *Daemon) HandleRestart(t state.RestartType) {
// die when asked to restart (systemd should get us back up!) etc
switch t {
case state.RestartDaemon:
case state.RestartSystem:
// try to schedule a fallback slow reboot already here
// in case we get stuck shutting down
if err := reboot(rebootWaitTimeout); err != nil {
logger.Noticef("%s", err)
}
d.mu.Lock()
defer d.mu.Unlock()
// remember we need to restart the system
d.restartSystem = true
case state.RestartSocket:
d.mu.Lock()
defer d.mu.Unlock()
d.restartSocket = true
default:
logger.Noticef("internal error: restart handler called with unknown restart type: %v", t)
}
d.tomb.Kill(nil)
}
var (
rebootNoticeWait = 3 * time.Second
rebootWaitTimeout = 10 * time.Minute
rebootRetryWaitTimeout = 5 * time.Minute
rebootMaxTentatives = 3
)
var shutdownTimeout = 25 * time.Second
// Stop shuts down the Daemon.
func (d *Daemon) Stop(sigCh chan<- os.Signal) error {
if d.rebootIsMissing {
// we need to schedule/wait for a system restart again
return d.doReboot(sigCh, rebootRetryWaitTimeout)
}
if d.overlord == nil {
return fmt.Errorf("internal error: no Overlord")
}
d.tomb.Kill(nil)
d.mu.Lock()
restartSystem := d.restartSystem
restartSocket := d.restartSocket
d.mu.Unlock()
d.generalListener.Close()
d.standbyOpinions.Stop()
if d.untrustedListener != nil {
d.untrustedListener.Close()
}
if restartSystem {
// give time to polling clients to notice restart
time.Sleep(rebootNoticeWait)
}
// We're using the background context here because the tomb's
// context will likely already have been cancelled when we are
// called.
ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
d.tomb.Kill(d.serve.Shutdown(ctx))
cancel()
if !restartSystem {
// tell systemd that we are stopping
systemdSdNotify("STOPPING=1")
}
if restartSocket {
// At this point we processed all open requests (and
// stopped accepting new requests) - before going into
// socket activated mode we need to check if any of
// those open requests resulted in something that
// prevents us from going into socket activation mode.
//
// If this is the case we do a "normal" snapd restart
// to process the new changes.
if !d.standbyOpinions.CanStandby() {
d.restartSocket = false
}
}
d.overlord.Stop()
err := d.tomb.Wait()
if err != nil {
// do not stop the shutdown even if the tomb errors
// because we already scheduled a slow shutdown and
// exiting here will just restart snapd (via systemd)
// which will lead to confusing results.
if restartSystem {
logger.Noticef("WARNING: cannot stop daemon: %v", err)
} else {
return err
}
}
if restartSystem {
return d.doReboot(sigCh, rebootWaitTimeout)
}
if d.restartSocket {
return ErrRestartSocket
}
return nil
}
func (d *Daemon) rebootDelay() (time.Duration, error) {
d.state.Lock()
defer d.state.Unlock()
now := time.Now()
// see whether a reboot had already been scheduled
var rebootAt time.Time
err := d.state.Get("daemon-system-restart-at", &rebootAt)
if err != nil && err != state.ErrNoState {
return 0, err
}
rebootDelay := 1 * time.Minute
if err == nil {
rebootDelay = rebootAt.Sub(now)
} else {
ovr := os.Getenv("SNAPD_REBOOT_DELAY") // for tests
if ovr != "" {
d, err := time.ParseDuration(ovr)
if err == nil {
rebootDelay = d
|
random_line_split
|
||
daemon.go
|
func (c *Command) canAccess(r *http.Request, user *userState) accessResult {
if c.AdminOnly && (c.UserOK || c.GuestOK || c.UntrustedOK) {
logger.Panicf("internal error: command cannot have AdminOnly together with any *OK flag")
}
if user != nil && !c.AdminOnly {
// Authenticated users do anything not requiring explicit admin.
return accessOK
}
// isUser means we have a UID for the request
isUser := false
pid, uid, socket, err := ucrednetGet(r.RemoteAddr)
if err == nil {
isUser = true
} else if err != errNoID {
logger.Noticef("unexpected error when attempting to get UID: %s", err)
return accessForbidden
}
isUntrusted := (socket == c.d.untrustedSocketPath)
_ = pid
_ = uid
if isUntrusted {
if c.UntrustedOK {
return accessOK
}
return accessUnauthorized
}
// the !AdminOnly check is redundant, but belt-and-suspenders
if r.Method == "GET" && !c.AdminOnly {
// Guest and user access restricted to GET requests
if c.GuestOK {
return accessOK
}
if isUser && c.UserOK {
return accessOK
}
}
// Remaining admin checks rely on identifying peer uid
if !isUser {
return accessUnauthorized
}
if uid == 0 || sys.UserID(uid) == sysGetuid() {
// Superuser and process owner can do anything.
return accessOK
}
if c.AdminOnly {
return accessUnauthorized
}
return accessUnauthorized
}
func userFromRequest(state interface{}, r *http.Request) (*userState, error) {
return nil, nil
}
func (c *Command) ServeHTTP(w http.ResponseWriter, r *http.Request) {
st := c.d.state
st.Lock()
user, err := userFromRequest(st, r)
if err != nil {
statusForbidden("forbidden").ServeHTTP(w, r)
return
}
st.Unlock()
// check if we are in degradedMode
if c.d.degradedErr != nil && r.Method != "GET" {
statusInternalError(c.d.degradedErr.Error()).ServeHTTP(w, r)
return
}
switch c.canAccess(r, user) {
case accessOK:
// nothing
case accessUnauthorized:
statusUnauthorized("access denied").ServeHTTP(w, r)
return
case accessForbidden:
statusForbidden("forbidden").ServeHTTP(w, r)
return
}
var rspf ResponseFunc
var rsp = statusMethodNotAllowed("method %q not allowed", r.Method)
switch r.Method {
case "GET":
rspf = c.GET
case "PUT":
rspf = c.PUT
case "POST":
rspf = c.POST
case "DELETE":
rspf = c.DELETE
}
if rspf != nil {
rsp = rspf(c, r, user)
}
if rsp, ok := rsp.(*resp); ok {
_, rst := st.Restarting()
switch rst {
case state.RestartSystem:
rsp.transmitMaintenance(errorKindSystemRestart, "system is restarting")
case state.RestartDaemon:
rsp.transmitMaintenance(errorKindDaemonRestart, "daemon is restarting")
case state.RestartSocket:
rsp.transmitMaintenance(errorKindDaemonRestart, "daemon is stopping to wait for socket activation")
}
if rsp.Type != ResponseTypeError {
st.Lock()
count, stamp := st.WarningsSummary()
st.Unlock()
rsp.addWarningsToMeta(count, stamp)
}
}
rsp.ServeHTTP(w, r)
}
type wrappedWriter struct {
w http.ResponseWriter
s int
}
func (w *wrappedWriter) Header() http.Header {
return w.w.Header()
}
func (w *wrappedWriter) Write(bs []byte) (int, error) {
return w.w.Write(bs)
}
func (w *wrappedWriter) WriteHeader(s int) {
w.w.WriteHeader(s)
w.s = s
}
func (w *wrappedWriter) Flush() {
if f, ok := w.w.(http.Flusher); ok {
f.Flush()
}
}
func logit(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ww := &wrappedWriter{w: w}
t0 := time.Now()
handler.ServeHTTP(ww, r)
t := time.Now().Sub(t0)
if !strings.Contains(r.URL.String(), "/v1/changes/") {
if strings.HasSuffix(r.RemoteAddr, ";") {
logger.Debugf("%s %s %s %s %d", r.RemoteAddr, r.Method, r.URL, t, ww.s)
logger.Noticef("%s %s %s %d", r.Method, r.URL, t, ww.s)
} else {
logger.Noticef("%s %s %s %s %d", r.RemoteAddr, r.Method, r.URL, t, ww.s)
}
}
})
}
// Init sets up the Daemon's internal workings.
// Don't call more than once.
func (d *Daemon) Init() error {
listenerMap := make(map[string]net.Listener)
if listener, err := getListener(d.normalSocketPath, listenerMap); err == nil {
d.generalListener = &ucrednetListener{Listener: listener}
} else {
return fmt.Errorf("when trying to listen on %s: %v", d.normalSocketPath, err)
}
if listener, err := getListener(d.untrustedSocketPath, listenerMap); err == nil {
// This listener may also be nil if that socket wasn't among
// the listeners, so check it before using it.
d.untrustedListener = &ucrednetListener{Listener: listener}
} else {
logger.Debugf("cannot get listener for %q: %v", d.untrustedSocketPath, err)
}
d.addRoutes()
logger.Noticef("Started daemon.")
return nil
}
// SetDegradedMode puts the daemon into an degraded mode which will the
// error given in the "err" argument for commands that are not marked
// as readonlyOK.
//
// This is useful to report errors to the client when the daemon
// cannot work because e.g. a sanity check failed or the system is out
// of diskspace.
//
// When the system is fine again calling "DegradedMode(nil)" is enough
// to put the daemon into full operation again.
func (d *Daemon)
|
(err error) {
d.degradedErr = err
}
func (d *Daemon) addRoutes() {
d.router = mux.NewRouter()
for _, c := range api {
c.d = d
if c.PathPrefix == "" {
d.router.Handle(c.Path, c).Name(c.Path)
} else {
d.router.PathPrefix(c.PathPrefix).Handler(c).Name(c.PathPrefix)
}
}
// also maybe add a /favicon.ico handler...
d.router.NotFoundHandler = statusNotFound("invalid API endpoint requested")
}
type connTracker struct {
mu sync.Mutex
conns map[net.Conn]struct{}
}
func (ct *connTracker) CanStandby() bool {
ct.mu.Lock()
defer ct.mu.Unlock()
return len(ct.conns) == 0
}
func (ct *connTracker) trackConn(conn net.Conn, state http.ConnState) {
ct.mu.Lock()
defer ct.mu.Unlock()
// we ignore hijacked connections, if we do things with websockets
// we'll need custom shutdown handling for them
if state == http.StateNew || state == http.StateActive {
ct.conns[conn] = struct{}{}
} else {
delete(ct.conns, conn)
}
}
func (d *Daemon) CanStandby() bool {
return systemd.SocketAvailable()
}
func (d *Daemon) initStandbyHandling() {
d.standbyOpinions = standby.New(d.state)
d.standbyOpinions.AddOpinion(d)
d.standbyOpinions.AddOpinion(d.connTracker)
d.standbyOpinions.AddOpinion(d.overlord)
d.standbyOpinions.Start()
}
func (d *Daemon) Start() {
if d.rebootIsMissing {
// we need to schedule and wait for a system restart
d.tomb.Kill(nil)
// avoid systemd killing us again while we wait
systemdSdNotify("READY=1")
return
}
if d.overlord == nil {
panic("internal error: no Overlord")
}
d.StartTime = time.Now()
d.connTracker = &connTracker{conns: make(map[net.Conn]struct{})}
d.serve = &http.Server{
Handler: logit(d.router),
ConnState: d.connTracker.trackConn,
}
d.initStandbyHandling()
d.overlord.Loop()
d.tomb.Go(func() error {
if d.untrustedListener != nil {
d.tomb.Go(func() error {
if err := d.serve.Serve(d.untrustedListener); err != http.ErrServerClosed && d.tomb.Err() == tomb.ErrStillAlive {
return err
}
return nil
})
}
if err := d.serve.Serve(d.generalListener); err != http.ErrServer
|
SetDegradedMode
|
identifier_name
|
base.py
|
=subprocess.PIPE)
output, errors = p.communicate()
if errors:
# Not currently logged in
p = subprocess.Popen(["azure", "login"], stderr=subprocess.PIPE)
output, errors = p.communicate()
if errors:
return "Failed to login: " + errors.decode("utf-8")
return "Logged in to Azure"
def _hostnameResolves(self, hostname):
try:
socket.gethostbyname(hostname)
return True
except socket.error:
return False
def getManagementEndpoint(self):
return self.config.get('ACS', 'dnsPrefix') + 'mgmt.' + self.config.get('Group', 'region').replace(" ", "").replace('"', '') + '.cloudapp.azure.com'
def getAgentEndpoint(self):
return self.config.get('ACS', 'dnsPrefix') + 'agents.' + self.config.get('Group', 'region').replace(" ", "").replace('"', '') + '.cloudapp.azure.com'
def createResourceGroup(self):
self.log.debug("Creating Resource Group")
command = "azure group create " + self.config.get('Group', 'name') + " " + self.config.get('Group', 'region')
os.system(command)
def run(self):
raise NotImplementedError("You must implement the run() method in your commands")
def help(self):
raise NotImplementedError("You must implement the help method. In most cases you will simply do 'print(__doc__)'")
def getAgentIPs(self):
# return a list of Agent IPs in this cluster
agentPool = AgentPool(self.config)
nics = agentPool.getNICs()
ips = []
for nic in nics:
try:
ip = nic["ipConfigurations"][0]["privateIPAddress"]
self.log.debug("IP for " + nic["name"] + " is: " + str(ip))
ips.append(ip)
except KeyError:
self.log.warning("NIC doesn't seem to have the information we need")
self.log.debug("Agent IPs: " + str(ips))
return ips
def executeOnAgent(self, cmd, ip):
"""
Execute command on an agent identified by agent_name
"""
sshadd = "ssh-add " + self.config.get("SSH", "privatekey")
self.shell_execute(sshadd)
sshAgentConnection = "ssh -o StrictHostKeyChecking=no " + self.config.get('ACS', 'username') + '@' + ip
self.log.debug("SSH Connection to agent: " + sshAgentConnection)
self.log.debug("Command to run on agent: " + cmd)
sshCmd = sshAgentConnection + ' \'' + cmd + '\''
self.shell_execute("exit")
result = self.executeOnMaster(sshCmd)
return result
def executeOnMaster(self, cmd):
"""
Execute command on the current master leader
"""
if self._hostnameResolves(self.getManagementEndpoint()):
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(
self.getManagementEndpoint(),
username = self.config.get('ACS', "username"),
port = 2200,
key_filename = os.path.expanduser(self.config.get('SSH', "privatekey")))
session = ssh.get_transport().open_session()
self.log.debug("Session opened on master.")
self.log.debug("Executing on master: " + cmd)
AgentRequestHandler(session)
stdin, stdout, stderr = ssh.exec_command(cmd)
stdin.close()
result = ""
for line in stdout.read().splitlines():
self.log.debug(line.decude("utf-8"))
result = result + line.decode("utf-8") + "\n"
for line in stderr.read().splitlines():
self.log.error(line.decode("utf-8"))
else:
self.log.error("Endpoint " + self.getManagementEndpoint() + " does not exist, cannot SSH into it.")
result = "Exception: No cluster is available at " + self.getManagementEndpoint()
ssh.close()
return result
def getClusterSetup(self):
"""
Get all the data about how this cluster is configured.
"""
data = {}
data["parameters"] = self.config.getACSParams()
fqdn = {}
fqdn["master"] = self.getManagementEndpoint()
fqdn["agent"] = self.getAgentEndpoint()
data["domains"] = fqdn
data["sshTunnel"] = "ssh -o StrictHostKeyChecking=no -L 80:localhost:80 -N " + self.config.get('ACS', 'username') + "@" + self.getManagementEndpoint() + " -p 2200"
azure = {}
azure['resourceGroup'] = self.config.get('Group', 'name')
data["azure"] = azure
return data
def
|
(self, cmd):
""" Execute a command on the client in a bash shell. """
self.log.debug("Executing command in shell: " + str(cmd))
dcos_config = os.path.expanduser('~/.dcos/dcos.toml')
os.environ['PATH'] = ':'.join([os.getenv('PATH'), '/src/bin'])
os.environ['DCOS_CONFIG'] = dcos_config
os.makedirs(os.path.dirname(dcos_config), exist_ok=True)
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, errors = p.communicate()
except OSError as e:
self.log.error("Error executing command " + str(cmd) + ". " + e)
raise e
return output.decode("utf-8"), errors.decode("utf-8")
"""The cofiguration for an ACS cluster to work with"""
from acs.ACSLogs import ACSLog
import configparser
import os
class Config(object):
def __init__(self, filename):
self.log = ACSLog("Config")
if not filename:
filename = "~/.acs/default.ini"
self.filename = os.path.expanduser(filename)
self.log.debug("Using config file at " + self.filename)
if not os.path.isfile(self.filename):
self.log.debug("Config file does not exist. Creating a new one.")
dns = input("What is the DNS prefix for this cluster?\n")
group = input("What is the name of the resource group you want to use/create?\n")
region = input("In which region do you want to deploy the resource group (default: westus)?\n") or 'westus'
username = input("What is your username (default: azureuser)?\n") or 'azureuser'
orchestrator = input("Which orchestrator do you want to use (Swarm or DCOS, default: DCOS)?\n") or 'DCOS'
masterCount = input("How many masters do you want in your cluster (1, 3 or 5, default: 3)?\n") or '3'
agentCount = input("How many agents do you want in your cluster (default: 3)?\n") or '3'
agentSize = input("Agent size required (default: Standard_D2_v2)?\n") or 'Standard_D2_v2'
tmpl = open("config/cluster.ini.tmpl")
output = open(self.filename, 'w')
for s in tmpl:
s = s.replace("MY-DNS-PREFIX", dns)
s = s.replace("MY-RESOURCE-REGION", region)
s = s.replace("MY-RESOURCE-GROUP-NAME", group)
s = s.replace("MY-USERNAME", username)
s = s.replace("MY-ORCHESTRATOR", orchestrator)
s = s.replace("MY-MASTER-COUNT", masterCount)
s = s.replace("MY-AGENT-COUNT", agentCount)
s = s.replace("MY-AGENT-SIZE", agentSize)
output.write(s)
self.log.debug("Writing config line: " + s)
tmpl.close()
output.close()
defaults = {"orchestratorType": "DCOS"}
config = configparser.ConfigParser(defaults)
config.read(self.filename)
config.set('Group', 'name', config.get('Group', 'name'))
self.config_parser = config
def get(self, section, name):
value = self.config_parser.get(section, name)
if section == "SSH":
public_filepath = os.path.expanduser(self.config_parser.get('SSH', 'publicKey'))
private_filepath = os.path.expanduser(self.config_parser.get('SSH', 'privatekey'))
if name == "privateKey":
self.log.debug("Checking if private SSH key exists: " + private_filepath)
if not os.path.isfile(private_filepath):
self.log.debug("Key does not exist")
self._generateSSHKey(private_filepath, public_filepath)
with open(private_filepath, 'r') as sshfile:
self.log.debug("Key does not exist")
value = sshfile.read().replace('\n', '')
elif name == "publickey":
self.log.debug("Checking if public SSH key exists: " + public_filepath)
if not os.path.isfile(public_filepath):
self._generateSSHKey(private_filepath, public_filepath)
with open(public_filepath, 'r') as sshfile:
value = sshfile.read().replace('\n', '')
return value
def getint(self, section, name):
return self.config_parser.getint(section, name)
def value(self, set_to):
value
|
shell_execute
|
identifier_name
|
base.py
|
stderr=subprocess.PIPE)
output, errors = p.communicate()
if errors:
# Not currently logged in
p = subprocess.Popen(["azure", "login"], stderr=subprocess.PIPE)
output, errors = p.communicate()
if errors:
return "Failed to login: " + errors.decode("utf-8")
return "Logged in to Azure"
def _hostnameResolves(self, hostname):
try:
socket.gethostbyname(hostname)
return True
except socket.error:
return False
def getManagementEndpoint(self):
return self.config.get('ACS', 'dnsPrefix') + 'mgmt.' + self.config.get('Group', 'region').replace(" ", "").replace('"', '') + '.cloudapp.azure.com'
def getAgentEndpoint(self):
return self.config.get('ACS', 'dnsPrefix') + 'agents.' + self.config.get('Group', 'region').replace(" ", "").replace('"', '') + '.cloudapp.azure.com'
def createResourceGroup(self):
self.log.debug("Creating Resource Group")
command = "azure group create " + self.config.get('Group', 'name') + " " + self.config.get('Group', 'region')
os.system(command)
def run(self):
raise NotImplementedError("You must implement the run() method in your commands")
def help(self):
raise NotImplementedError("You must implement the help method. In most cases you will simply do 'print(__doc__)'")
def getAgentIPs(self):
# return a list of Agent IPs in this cluster
agentPool = AgentPool(self.config)
nics = agentPool.getNICs()
ips = []
for nic in nics:
try:
ip = nic["ipConfigurations"][0]["privateIPAddress"]
self.log.debug("IP for " + nic["name"] + " is: " + str(ip))
ips.append(ip)
except KeyError:
self.log.warning("NIC doesn't seem to have the information we need")
self.log.debug("Agent IPs: " + str(ips))
return ips
def executeOnAgent(self, cmd, ip):
"""
Execute command on an agent identified by agent_name
"""
sshadd = "ssh-add " + self.config.get("SSH", "privatekey")
self.shell_execute(sshadd)
sshAgentConnection = "ssh -o StrictHostKeyChecking=no " + self.config.get('ACS', 'username') + '@' + ip
self.log.debug("SSH Connection to agent: " + sshAgentConnection)
self.log.debug("Command to run on agent: " + cmd)
sshCmd = sshAgentConnection + ' \'' + cmd + '\''
self.shell_execute("exit")
result = self.executeOnMaster(sshCmd)
return result
def executeOnMaster(self, cmd):
"""
Execute command on the current master leader
"""
if self._hostnameResolves(self.getManagementEndpoint()):
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(
self.getManagementEndpoint(),
username = self.config.get('ACS', "username"),
port = 2200,
key_filename = os.path.expanduser(self.config.get('SSH', "privatekey")))
session = ssh.get_transport().open_session()
self.log.debug("Session opened on master.")
self.log.debug("Executing on master: " + cmd)
AgentRequestHandler(session)
stdin, stdout, stderr = ssh.exec_command(cmd)
stdin.close()
result = ""
for line in stdout.read().splitlines():
self.log.debug(line.decude("utf-8"))
result = result + line.decode("utf-8") + "\n"
for line in stderr.read().splitlines():
self.log.error(line.decode("utf-8"))
else:
self.log.error("Endpoint " + self.getManagementEndpoint() + " does not exist, cannot SSH into it.")
result = "Exception: No cluster is available at " + self.getManagementEndpoint()
ssh.close()
return result
def getClusterSetup(self):
"""
Get all the data about how this cluster is configured.
"""
data = {}
data["parameters"] = self.config.getACSParams()
fqdn = {}
fqdn["master"] = self.getManagementEndpoint()
fqdn["agent"] = self.getAgentEndpoint()
data["domains"] = fqdn
data["sshTunnel"] = "ssh -o StrictHostKeyChecking=no -L 80:localhost:80 -N " + self.config.get('ACS', 'username') + "@" + self.getManagementEndpoint() + " -p 2200"
azure = {}
azure['resourceGroup'] = self.config.get('Group', 'name')
data["azure"] = azure
return data
def shell_execute(self, cmd):
""" Execute a command on the client in a bash shell. """
self.log.debug("Executing command in shell: " + str(cmd))
dcos_config = os.path.expanduser('~/.dcos/dcos.toml')
os.environ['PATH'] = ':'.join([os.getenv('PATH'), '/src/bin'])
os.environ['DCOS_CONFIG'] = dcos_config
os.makedirs(os.path.dirname(dcos_config), exist_ok=True)
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, errors = p.communicate()
except OSError as e:
self.log.error("Error executing command " + str(cmd) + ". " + e)
raise e
return output.decode("utf-8"), errors.decode("utf-8")
"""The cofiguration for an ACS cluster to work with"""
from acs.ACSLogs import ACSLog
import configparser
import os
class Config(object):
def __init__(self, filename):
self.log = ACSLog("Config")
if not filename:
filename = "~/.acs/default.ini"
self.filename = os.path.expanduser(filename)
self.log.debug("Using config file at " + self.filename)
if not os.path.isfile(self.filename):
self.log.debug("Config file does not exist. Creating a new one.")
dns = input("What is the DNS prefix for this cluster?\n")
group = input("What is the name of the resource group you want to use/create?\n")
region = input("In which region do you want to deploy the resource group (default: westus)?\n") or 'westus'
username = input("What is your username (default: azureuser)?\n") or 'azureuser'
orchestrator = input("Which orchestrator do you want to use (Swarm or DCOS, default: DCOS)?\n") or 'DCOS'
masterCount = input("How many masters do you want in your cluster (1, 3 or 5, default: 3)?\n") or '3'
agentCount = input("How many agents do you want in your cluster (default: 3)?\n") or '3'
agentSize = input("Agent size required (default: Standard_D2_v2)?\n") or 'Standard_D2_v2'
tmpl = open("config/cluster.ini.tmpl")
output = open(self.filename, 'w')
for s in tmpl:
s = s.replace("MY-DNS-PREFIX", dns)
s = s.replace("MY-RESOURCE-REGION", region)
s = s.replace("MY-RESOURCE-GROUP-NAME", group)
s = s.replace("MY-USERNAME", username)
s = s.replace("MY-ORCHESTRATOR", orchestrator)
s = s.replace("MY-MASTER-COUNT", masterCount)
s = s.replace("MY-AGENT-COUNT", agentCount)
s = s.replace("MY-AGENT-SIZE", agentSize)
output.write(s)
self.log.debug("Writing config line: " + s)
tmpl.close()
output.close()
defaults = {"orchestratorType": "DCOS"}
config = configparser.ConfigParser(defaults)
config.read(self.filename)
config.set('Group', 'name', config.get('Group', 'name'))
self.config_parser = config
def get(self, section, name):
value = self.config_parser.get(section, name)
if section == "SSH":
public_filepath = os.path.expanduser(self.config_parser.get('SSH', 'publicKey'))
private_filepath = os.path.expanduser(self.config_parser.get('SSH', 'privatekey'))
if name == "privateKey":
|
elif name == "publickey":
self.log.debug("Checking if public SSH key exists: " + public_filepath)
if not os.path.isfile(public_filepath):
self._generateSSHKey(private_filepath, public_filepath)
with open(public_filepath, 'r') as sshfile:
value = sshfile.read().replace('\n', '')
return value
def getint(self, section, name):
return self.config_parser.getint(section, name)
def value(self, set_to):
value
|
self.log.debug("Checking if private SSH key exists: " + private_filepath)
if not os.path.isfile(private_filepath):
self.log.debug("Key does not exist")
self._generateSSHKey(private_filepath, public_filepath)
with open(private_filepath, 'r') as sshfile:
self.log.debug("Key does not exist")
value = sshfile.read().replace('\n', '')
|
conditional_block
|
base.py
|
stderr=subprocess.PIPE)
output, errors = p.communicate()
if errors:
# Not currently logged in
p = subprocess.Popen(["azure", "login"], stderr=subprocess.PIPE)
output, errors = p.communicate()
if errors:
return "Failed to login: " + errors.decode("utf-8")
return "Logged in to Azure"
def _hostnameResolves(self, hostname):
try:
socket.gethostbyname(hostname)
return True
except socket.error:
return False
def getManagementEndpoint(self):
return self.config.get('ACS', 'dnsPrefix') + 'mgmt.' + self.config.get('Group', 'region').replace(" ", "").replace('"', '') + '.cloudapp.azure.com'
def getAgentEndpoint(self):
return self.config.get('ACS', 'dnsPrefix') + 'agents.' + self.config.get('Group', 'region').replace(" ", "").replace('"', '') + '.cloudapp.azure.com'
def createResourceGroup(self):
self.log.debug("Creating Resource Group")
command = "azure group create " + self.config.get('Group', 'name') + " " + self.config.get('Group', 'region')
os.system(command)
def run(self):
raise NotImplementedError("You must implement the run() method in your commands")
def help(self):
raise NotImplementedError("You must implement the help method. In most cases you will simply do 'print(__doc__)'")
def getAgentIPs(self):
# return a list of Agent IPs in this cluster
agentPool = AgentPool(self.config)
nics = agentPool.getNICs()
ips = []
for nic in nics:
try:
ip = nic["ipConfigurations"][0]["privateIPAddress"]
self.log.debug("IP for " + nic["name"] + " is: " + str(ip))
ips.append(ip)
except KeyError:
self.log.warning("NIC doesn't seem to have the information we need")
self.log.debug("Agent IPs: " + str(ips))
return ips
def executeOnAgent(self, cmd, ip):
"""
Execute command on an agent identified by agent_name
"""
sshadd = "ssh-add " + self.config.get("SSH", "privatekey")
self.shell_execute(sshadd)
sshAgentConnection = "ssh -o StrictHostKeyChecking=no " + self.config.get('ACS', 'username') + '@' + ip
self.log.debug("SSH Connection to agent: " + sshAgentConnection)
self.log.debug("Command to run on agent: " + cmd)
sshCmd = sshAgentConnection + ' \'' + cmd + '\''
self.shell_execute("exit")
result = self.executeOnMaster(sshCmd)
return result
def executeOnMaster(self, cmd):
"""
Execute command on the current master leader
"""
if self._hostnameResolves(self.getManagementEndpoint()):
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(
self.getManagementEndpoint(),
username = self.config.get('ACS', "username"),
port = 2200,
key_filename = os.path.expanduser(self.config.get('SSH', "privatekey")))
session = ssh.get_transport().open_session()
self.log.debug("Session opened on master.")
self.log.debug("Executing on master: " + cmd)
AgentRequestHandler(session)
stdin, stdout, stderr = ssh.exec_command(cmd)
stdin.close()
result = ""
for line in stdout.read().splitlines():
self.log.debug(line.decude("utf-8"))
result = result + line.decode("utf-8") + "\n"
for line in stderr.read().splitlines():
self.log.error(line.decode("utf-8"))
else:
self.log.error("Endpoint " + self.getManagementEndpoint() + " does not exist, cannot SSH into it.")
result = "Exception: No cluster is available at " + self.getManagementEndpoint()
ssh.close()
return result
def getClusterSetup(self):
"""
Get all the data about how this cluster is configured.
"""
data = {}
data["parameters"] = self.config.getACSParams()
fqdn = {}
fqdn["master"] = self.getManagementEndpoint()
fqdn["agent"] = self.getAgentEndpoint()
data["domains"] = fqdn
data["sshTunnel"] = "ssh -o StrictHostKeyChecking=no -L 80:localhost:80 -N " + self.config.get('ACS', 'username') + "@" + self.getManagementEndpoint() + " -p 2200"
azure = {}
azure['resourceGroup'] = self.config.get('Group', 'name')
data["azure"] = azure
return data
|
def shell_execute(self, cmd):
""" Execute a command on the client in a bash shell. """
self.log.debug("Executing command in shell: " + str(cmd))
dcos_config = os.path.expanduser('~/.dcos/dcos.toml')
os.environ['PATH'] = ':'.join([os.getenv('PATH'), '/src/bin'])
os.environ['DCOS_CONFIG'] = dcos_config
os.makedirs(os.path.dirname(dcos_config), exist_ok=True)
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, errors = p.communicate()
except OSError as e:
self.log.error("Error executing command " + str(cmd) + ". " + e)
raise e
return output.decode("utf-8"), errors.decode("utf-8")
"""The cofiguration for an ACS cluster to work with"""
from acs.ACSLogs import ACSLog
import configparser
import os
class Config(object):
def __init__(self, filename):
self.log = ACSLog("Config")
if not filename:
filename = "~/.acs/default.ini"
self.filename = os.path.expanduser(filename)
self.log.debug("Using config file at " + self.filename)
if not os.path.isfile(self.filename):
self.log.debug("Config file does not exist. Creating a new one.")
dns = input("What is the DNS prefix for this cluster?\n")
group = input("What is the name of the resource group you want to use/create?\n")
region = input("In which region do you want to deploy the resource group (default: westus)?\n") or 'westus'
username = input("What is your username (default: azureuser)?\n") or 'azureuser'
orchestrator = input("Which orchestrator do you want to use (Swarm or DCOS, default: DCOS)?\n") or 'DCOS'
masterCount = input("How many masters do you want in your cluster (1, 3 or 5, default: 3)?\n") or '3'
agentCount = input("How many agents do you want in your cluster (default: 3)?\n") or '3'
agentSize = input("Agent size required (default: Standard_D2_v2)?\n") or 'Standard_D2_v2'
tmpl = open("config/cluster.ini.tmpl")
output = open(self.filename, 'w')
for s in tmpl:
s = s.replace("MY-DNS-PREFIX", dns)
s = s.replace("MY-RESOURCE-REGION", region)
s = s.replace("MY-RESOURCE-GROUP-NAME", group)
s = s.replace("MY-USERNAME", username)
s = s.replace("MY-ORCHESTRATOR", orchestrator)
s = s.replace("MY-MASTER-COUNT", masterCount)
s = s.replace("MY-AGENT-COUNT", agentCount)
s = s.replace("MY-AGENT-SIZE", agentSize)
output.write(s)
self.log.debug("Writing config line: " + s)
tmpl.close()
output.close()
defaults = {"orchestratorType": "DCOS"}
config = configparser.ConfigParser(defaults)
config.read(self.filename)
config.set('Group', 'name', config.get('Group', 'name'))
self.config_parser = config
def get(self, section, name):
value = self.config_parser.get(section, name)
if section == "SSH":
public_filepath = os.path.expanduser(self.config_parser.get('SSH', 'publicKey'))
private_filepath = os.path.expanduser(self.config_parser.get('SSH', 'privatekey'))
if name == "privateKey":
self.log.debug("Checking if private SSH key exists: " + private_filepath)
if not os.path.isfile(private_filepath):
self.log.debug("Key does not exist")
self._generateSSHKey(private_filepath, public_filepath)
with open(private_filepath, 'r') as sshfile:
self.log.debug("Key does not exist")
value = sshfile.read().replace('\n', '')
elif name == "publickey":
self.log.debug("Checking if public SSH key exists: " + public_filepath)
if not os.path.isfile(public_filepath):
self._generateSSHKey(private_filepath, public_filepath)
with open(public_filepath, 'r') as sshfile:
value = sshfile.read().replace('\n', '')
return value
def getint(self, section, name):
return self.config_parser.getint(section, name)
def value(self, set_to):
value
|
random_line_split
|
|
base.py
|
=subprocess.PIPE)
output, errors = p.communicate()
if errors:
# Not currently logged in
p = subprocess.Popen(["azure", "login"], stderr=subprocess.PIPE)
output, errors = p.communicate()
if errors:
return "Failed to login: " + errors.decode("utf-8")
return "Logged in to Azure"
def _hostnameResolves(self, hostname):
try:
socket.gethostbyname(hostname)
return True
except socket.error:
return False
def getManagementEndpoint(self):
return self.config.get('ACS', 'dnsPrefix') + 'mgmt.' + self.config.get('Group', 'region').replace(" ", "").replace('"', '') + '.cloudapp.azure.com'
def getAgentEndpoint(self):
return self.config.get('ACS', 'dnsPrefix') + 'agents.' + self.config.get('Group', 'region').replace(" ", "").replace('"', '') + '.cloudapp.azure.com'
def createResourceGroup(self):
|
def run(self):
raise NotImplementedError("You must implement the run() method in your commands")
def help(self):
raise NotImplementedError("You must implement the help method. In most cases you will simply do 'print(__doc__)'")
def getAgentIPs(self):
# return a list of Agent IPs in this cluster
agentPool = AgentPool(self.config)
nics = agentPool.getNICs()
ips = []
for nic in nics:
try:
ip = nic["ipConfigurations"][0]["privateIPAddress"]
self.log.debug("IP for " + nic["name"] + " is: " + str(ip))
ips.append(ip)
except KeyError:
self.log.warning("NIC doesn't seem to have the information we need")
self.log.debug("Agent IPs: " + str(ips))
return ips
def executeOnAgent(self, cmd, ip):
"""
Execute command on an agent identified by agent_name
"""
sshadd = "ssh-add " + self.config.get("SSH", "privatekey")
self.shell_execute(sshadd)
sshAgentConnection = "ssh -o StrictHostKeyChecking=no " + self.config.get('ACS', 'username') + '@' + ip
self.log.debug("SSH Connection to agent: " + sshAgentConnection)
self.log.debug("Command to run on agent: " + cmd)
sshCmd = sshAgentConnection + ' \'' + cmd + '\''
self.shell_execute("exit")
result = self.executeOnMaster(sshCmd)
return result
def executeOnMaster(self, cmd):
"""
Execute command on the current master leader
"""
if self._hostnameResolves(self.getManagementEndpoint()):
ssh = SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(
self.getManagementEndpoint(),
username = self.config.get('ACS', "username"),
port = 2200,
key_filename = os.path.expanduser(self.config.get('SSH', "privatekey")))
session = ssh.get_transport().open_session()
self.log.debug("Session opened on master.")
self.log.debug("Executing on master: " + cmd)
AgentRequestHandler(session)
stdin, stdout, stderr = ssh.exec_command(cmd)
stdin.close()
result = ""
for line in stdout.read().splitlines():
self.log.debug(line.decude("utf-8"))
result = result + line.decode("utf-8") + "\n"
for line in stderr.read().splitlines():
self.log.error(line.decode("utf-8"))
else:
self.log.error("Endpoint " + self.getManagementEndpoint() + " does not exist, cannot SSH into it.")
result = "Exception: No cluster is available at " + self.getManagementEndpoint()
ssh.close()
return result
def getClusterSetup(self):
"""
Get all the data about how this cluster is configured.
"""
data = {}
data["parameters"] = self.config.getACSParams()
fqdn = {}
fqdn["master"] = self.getManagementEndpoint()
fqdn["agent"] = self.getAgentEndpoint()
data["domains"] = fqdn
data["sshTunnel"] = "ssh -o StrictHostKeyChecking=no -L 80:localhost:80 -N " + self.config.get('ACS', 'username') + "@" + self.getManagementEndpoint() + " -p 2200"
azure = {}
azure['resourceGroup'] = self.config.get('Group', 'name')
data["azure"] = azure
return data
def shell_execute(self, cmd):
""" Execute a command on the client in a bash shell. """
self.log.debug("Executing command in shell: " + str(cmd))
dcos_config = os.path.expanduser('~/.dcos/dcos.toml')
os.environ['PATH'] = ':'.join([os.getenv('PATH'), '/src/bin'])
os.environ['DCOS_CONFIG'] = dcos_config
os.makedirs(os.path.dirname(dcos_config), exist_ok=True)
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, errors = p.communicate()
except OSError as e:
self.log.error("Error executing command " + str(cmd) + ". " + e)
raise e
return output.decode("utf-8"), errors.decode("utf-8")
"""The cofiguration for an ACS cluster to work with"""
from acs.ACSLogs import ACSLog
import configparser
import os
class Config(object):
def __init__(self, filename):
self.log = ACSLog("Config")
if not filename:
filename = "~/.acs/default.ini"
self.filename = os.path.expanduser(filename)
self.log.debug("Using config file at " + self.filename)
if not os.path.isfile(self.filename):
self.log.debug("Config file does not exist. Creating a new one.")
dns = input("What is the DNS prefix for this cluster?\n")
group = input("What is the name of the resource group you want to use/create?\n")
region = input("In which region do you want to deploy the resource group (default: westus)?\n") or 'westus'
username = input("What is your username (default: azureuser)?\n") or 'azureuser'
orchestrator = input("Which orchestrator do you want to use (Swarm or DCOS, default: DCOS)?\n") or 'DCOS'
masterCount = input("How many masters do you want in your cluster (1, 3 or 5, default: 3)?\n") or '3'
agentCount = input("How many agents do you want in your cluster (default: 3)?\n") or '3'
agentSize = input("Agent size required (default: Standard_D2_v2)?\n") or 'Standard_D2_v2'
tmpl = open("config/cluster.ini.tmpl")
output = open(self.filename, 'w')
for s in tmpl:
s = s.replace("MY-DNS-PREFIX", dns)
s = s.replace("MY-RESOURCE-REGION", region)
s = s.replace("MY-RESOURCE-GROUP-NAME", group)
s = s.replace("MY-USERNAME", username)
s = s.replace("MY-ORCHESTRATOR", orchestrator)
s = s.replace("MY-MASTER-COUNT", masterCount)
s = s.replace("MY-AGENT-COUNT", agentCount)
s = s.replace("MY-AGENT-SIZE", agentSize)
output.write(s)
self.log.debug("Writing config line: " + s)
tmpl.close()
output.close()
defaults = {"orchestratorType": "DCOS"}
config = configparser.ConfigParser(defaults)
config.read(self.filename)
config.set('Group', 'name', config.get('Group', 'name'))
self.config_parser = config
def get(self, section, name):
value = self.config_parser.get(section, name)
if section == "SSH":
public_filepath = os.path.expanduser(self.config_parser.get('SSH', 'publicKey'))
private_filepath = os.path.expanduser(self.config_parser.get('SSH', 'privatekey'))
if name == "privateKey":
self.log.debug("Checking if private SSH key exists: " + private_filepath)
if not os.path.isfile(private_filepath):
self.log.debug("Key does not exist")
self._generateSSHKey(private_filepath, public_filepath)
with open(private_filepath, 'r') as sshfile:
self.log.debug("Key does not exist")
value = sshfile.read().replace('\n', '')
elif name == "publickey":
self.log.debug("Checking if public SSH key exists: " + public_filepath)
if not os.path.isfile(public_filepath):
self._generateSSHKey(private_filepath, public_filepath)
with open(public_filepath, 'r') as sshfile:
value = sshfile.read().replace('\n', '')
return value
def getint(self, section, name):
return self.config_parser.getint(section, name)
def value(self, set_to):
value
|
self.log.debug("Creating Resource Group")
command = "azure group create " + self.config.get('Group', 'name') + " " + self.config.get('Group', 'region')
os.system(command)
|
identifier_body
|
fetch.rs
|
#[derive(Debug)]
struct State {
offset: u64,
eof: u32,
last_line: Vec<u8>,
last_request: Instant,
}
#[derive(Debug)]
struct Cursor {
url: Arc<Url>,
state: Option<State>,
}
struct Requests {
cursors: VecDeque<Arc<Mutex<Cursor>>>,
timeout: Timeout,
}
#[derive(Debug)]
pub struct Request {
cursor: Arc<Mutex<Cursor>>,
range: Option<(u64, u64, u64)>,
}
pub fn group_addrs(vec: Vec<(Address, Vec<Arc<Url>>)>)
-> HashMap<SocketAddr, Vec<Arc<Url>>>
{
let mut urls_by_ip = HashMap::new();
for (addr, urls) in vec {
for sa in addr.addresses_at(0) {
let set = urls_by_ip.entry(sa)
.or_insert_with(HashSet::new);
for url in &urls {
set.insert(url.clone());
}
}
}
let mut ordered = urls_by_ip.iter().collect::<Vec<_>>();
ordered.sort_by_key(|&(_, y)| y.len());
let mut active_ips = HashMap::new();
let mut visited_urls = HashSet::new();
for (ip, urls) in ordered {
let urls = urls.difference(&visited_urls).cloned().collect::<Vec<_>>();
if urls.len() == 0 {
continue;
}
visited_urls.extend(urls.iter().cloned());
active_ips.insert(*ip, urls);
}
return active_ips;
}
#[allow(dead_code)]
pub fn tls_host(host: &str) -> &str {
match host.find(':') {
Some(x) => &host[..x],
None => host,
}
}
pub fn http(resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
let resolver = resolver.clone();
let cfg = Config::new()
.keep_alive_timeout(Duration::new(25, 0))
.done();
return Box::new(
join_all(urls_by_host.into_iter().map(move |(host, list)| {
let h1 = host.clone();
resolver.resolve_auto(&host, 80).map(|ips| (ips, list))
.map_err(move |e| error!("Error resolving {:?}: {}", h1, e))
}))
.map(group_addrs)
.and_then(move |map| {
join_all(map.into_iter().map(move |(ip, urls)| {
let cfg = cfg.clone();
TcpStream::connect(&ip, &handle())
.map_err(move |e| {
error!("Error connecting to {}: {}", ip, e);
})
.and_then(move |sock| {
Proto::new(sock, &handle(), &cfg)
.send_all(Requests::new(urls))
.map(|_| unreachable!())
.map_err(move |e| {
error!("Error (ip: {}): {}", ip, e);
})
})
}))
.map(|_| ())
}));
}
#[cfg(not(any(feature="tls_native", feature="tls_rustls")))]
pub fn https(_resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
use futures::future::err;
if urls_by_host.len() > 0 {
eprintln!("Compiled without TLS support");
return Box::new(err(()));
}
return Box::new(ok(()));
}
#[cfg(feature="tls_native")]
pub fn https(resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
use std::io;
if urls_by_host.len() == 0 {
return Box::new(ok(()));
}
let resolver = resolver.clone();
let cfg = Config::new().done();
let cx = TlsConnector::builder().expect("tls builder can be created works")
.build().expect("tls builder works");
return Box::new(
join_all(urls_by_host.into_iter().map(move |(host, list)| {
let h1 = host.clone();
resolver.resolve_auto(&host, 80).map(|addr| (host, addr, list))
.map_err(move |e| error!("Error resolving {:?}: {}", h1, e))
}))
.and_then(move |map| {
join_all(map.into_iter().map(move |(host, addr, urls)| {
let ip = addr.pick_one().expect("no IPs");
let cfg = cfg.clone();
let cx = cx.clone();
TcpStream::connect(&ip, &handle())
.and_then(move |sock| {
cx.connect_async(tls_host(&host), sock).map_err(|e| {
io::Error::new(io::ErrorKind::Other, e)
})
})
.map_err(move |e| {
error!("Error connecting to {}: {}", ip, e);
})
.and_then(move |sock| {
Proto::new(sock, &handle(), &cfg)
.send_all(Requests::new(urls))
.map(|_| unreachable!())
.map_err(move |e| {
error!("Error (ip: {}): {}", ip, e);
})
})
}))
.map(|_| ())
}));
}
#[cfg(feature="tls_rustls")]
pub fn https(resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
use std::io::BufReader;
use std::fs::File;
if urls_by_host.len() == 0 {
return Box::new(ok(()));
}
let resolver = resolver.clone();
let tls = Arc::new({
let mut cfg = ClientConfig::new();
let read_root = File::open("/etc/ssl/certs/ca-certificates.crt")
.map_err(|e| format!("{}", e))
.and_then(|f|
cfg.root_store.add_pem_file(&mut BufReader::new(f))
.map_err(|()| format!("unrecognized format")));
match read_root {
Ok((_, _)) => {} // TODO(tailhook) log numbers
Err(e) => {
warn!("Can find root certificates at {:?}: {}. \
Using embedded ones.",
"/etc/ssl/certs/ca-certificates.crt", e);
}
}
cfg.root_store.add_server_trust_anchors(
&webpki_roots::TLS_SERVER_ROOTS);
cfg
});
let cfg = Config::new().done();
return Box::new(
join_all(urls_by_host.into_iter().map(move |(host, list)| {
resolver.resolve_auto(&host, 80).map(|addr| (host, addr, list))
}))
.map_err(|e| error!("Error resolving: {}", e))
.and_then(move |map| {
join_all(map.into_iter().map(move |(host, addr, urls)| {
let ip = addr.pick_one().expect("no ips");
let cfg = cfg.clone();
let tls = tls.clone();
TcpStream::connect(&ip, &handle())
.and_then(move |sock| {
tls.connect_async(tls_host(&host), sock)
})
.map_err(move |e| {
error!("Error connecting to {}: {}", ip, e);
})
.and_then(move |sock| {
Proto::new(sock, &handle(), &cfg)
.send_all(Requests::new(urls))
.map(|_| unreachable!())
.map_err(move |e| {
error!("Error (ip: {}): {}", ip, e);
})
})
}))
.map(|_| ())
}));
}
fn request(cur: &Arc<Mutex<Cursor>>) -> Result<Request, Instant> {
let intr = cur.lock().unwrap();
match intr.state {
None => return Ok(Request {
cursor: cur.clone(),
range: None,
}),
Some(ref state) => {
if state.eof == 0 {
return Ok(Request {
cursor: cur.clone(),
range: None,
});
}
let next = state.last_request +
Duration::from_millis(100 * min(state.eof as u64, 70));
if next < Instant::now() {
return Ok(Request {
cursor: cur.clone(),
range: None,
});
}
return Err(next);
}
}
}
impl Requests {
fn new(urls: Vec<Arc<Url>>) -> Requests {
Requests {
cursors: urls.into_iter().map(|u| Arc::new(Mutex::new(Cursor {
url: u,
state: None,
}))).collect(),
timeout: timeout_at(Instant::now()),
}
}
}
impl Stream for Requests {
type Item = Request;
type Error = Error;
fn poll(&mut self) -> Result<Async<Option<Request>>, Error> {
loop {
match self.timeout.poll().unwrap() {
Async::Ready(()) => {}
Async::NotReady => return Ok(Async::NotReady),
}
let mut min_time = Instant::now() + Duration::new(7, 0);
|
#[cfg(feature="tls_rustls")] use webpki_roots;
|
random_line_split
|
|
fetch.rs
|
Cursor>>,
range: Option<(u64, u64, u64)>,
}
pub fn group_addrs(vec: Vec<(Address, Vec<Arc<Url>>)>)
-> HashMap<SocketAddr, Vec<Arc<Url>>>
{
let mut urls_by_ip = HashMap::new();
for (addr, urls) in vec {
for sa in addr.addresses_at(0) {
let set = urls_by_ip.entry(sa)
.or_insert_with(HashSet::new);
for url in &urls {
set.insert(url.clone());
}
}
}
let mut ordered = urls_by_ip.iter().collect::<Vec<_>>();
ordered.sort_by_key(|&(_, y)| y.len());
let mut active_ips = HashMap::new();
let mut visited_urls = HashSet::new();
for (ip, urls) in ordered {
let urls = urls.difference(&visited_urls).cloned().collect::<Vec<_>>();
if urls.len() == 0 {
continue;
}
visited_urls.extend(urls.iter().cloned());
active_ips.insert(*ip, urls);
}
return active_ips;
}
#[allow(dead_code)]
pub fn tls_host(host: &str) -> &str {
match host.find(':') {
Some(x) => &host[..x],
None => host,
}
}
pub fn http(resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
let resolver = resolver.clone();
let cfg = Config::new()
.keep_alive_timeout(Duration::new(25, 0))
.done();
return Box::new(
join_all(urls_by_host.into_iter().map(move |(host, list)| {
let h1 = host.clone();
resolver.resolve_auto(&host, 80).map(|ips| (ips, list))
.map_err(move |e| error!("Error resolving {:?}: {}", h1, e))
}))
.map(group_addrs)
.and_then(move |map| {
join_all(map.into_iter().map(move |(ip, urls)| {
let cfg = cfg.clone();
TcpStream::connect(&ip, &handle())
.map_err(move |e| {
error!("Error connecting to {}: {}", ip, e);
})
.and_then(move |sock| {
Proto::new(sock, &handle(), &cfg)
.send_all(Requests::new(urls))
.map(|_| unreachable!())
.map_err(move |e| {
error!("Error (ip: {}): {}", ip, e);
})
})
}))
.map(|_| ())
}));
}
#[cfg(not(any(feature="tls_native", feature="tls_rustls")))]
pub fn https(_resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
use futures::future::err;
if urls_by_host.len() > 0 {
eprintln!("Compiled without TLS support");
return Box::new(err(()));
}
return Box::new(ok(()));
}
#[cfg(feature="tls_native")]
pub fn https(resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
use std::io;
if urls_by_host.len() == 0 {
return Box::new(ok(()));
}
let resolver = resolver.clone();
let cfg = Config::new().done();
let cx = TlsConnector::builder().expect("tls builder can be created works")
.build().expect("tls builder works");
return Box::new(
join_all(urls_by_host.into_iter().map(move |(host, list)| {
let h1 = host.clone();
resolver.resolve_auto(&host, 80).map(|addr| (host, addr, list))
.map_err(move |e| error!("Error resolving {:?}: {}", h1, e))
}))
.and_then(move |map| {
join_all(map.into_iter().map(move |(host, addr, urls)| {
let ip = addr.pick_one().expect("no IPs");
let cfg = cfg.clone();
let cx = cx.clone();
TcpStream::connect(&ip, &handle())
.and_then(move |sock| {
cx.connect_async(tls_host(&host), sock).map_err(|e| {
io::Error::new(io::ErrorKind::Other, e)
})
})
.map_err(move |e| {
error!("Error connecting to {}: {}", ip, e);
})
.and_then(move |sock| {
Proto::new(sock, &handle(), &cfg)
.send_all(Requests::new(urls))
.map(|_| unreachable!())
.map_err(move |e| {
error!("Error (ip: {}): {}", ip, e);
})
})
}))
.map(|_| ())
}));
}
#[cfg(feature="tls_rustls")]
pub fn https(resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
|
"/etc/ssl/certs/ca-certificates.crt", e);
}
}
cfg.root_store.add_server_trust_anchors(
&webpki_roots::TLS_SERVER_ROOTS);
cfg
});
let cfg = Config::new().done();
return Box::new(
join_all(urls_by_host.into_iter().map(move |(host, list)| {
resolver.resolve_auto(&host, 80).map(|addr| (host, addr, list))
}))
.map_err(|e| error!("Error resolving: {}", e))
.and_then(move |map| {
join_all(map.into_iter().map(move |(host, addr, urls)| {
let ip = addr.pick_one().expect("no ips");
let cfg = cfg.clone();
let tls = tls.clone();
TcpStream::connect(&ip, &handle())
.and_then(move |sock| {
tls.connect_async(tls_host(&host), sock)
})
.map_err(move |e| {
error!("Error connecting to {}: {}", ip, e);
})
.and_then(move |sock| {
Proto::new(sock, &handle(), &cfg)
.send_all(Requests::new(urls))
.map(|_| unreachable!())
.map_err(move |e| {
error!("Error (ip: {}): {}", ip, e);
})
})
}))
.map(|_| ())
}));
}
fn request(cur: &Arc<Mutex<Cursor>>) -> Result<Request, Instant> {
let intr = cur.lock().unwrap();
match intr.state {
None => return Ok(Request {
cursor: cur.clone(),
range: None,
}),
Some(ref state) => {
if state.eof == 0 {
return Ok(Request {
cursor: cur.clone(),
range: None,
});
}
let next = state.last_request +
Duration::from_millis(100 * min(state.eof as u64, 70));
if next < Instant::now() {
return Ok(Request {
cursor: cur.clone(),
range: None,
});
}
return Err(next);
}
}
}
impl Requests {
fn new(urls: Vec<Arc<Url>>) -> Requests {
Requests {
cursors: urls.into_iter().map(|u| Arc::new(Mutex::new(Cursor {
url: u,
state: None,
}))).collect(),
timeout: timeout_at(Instant::now()),
}
}
}
impl Stream for Requests {
type Item = Request;
type Error = Error;
fn poll(&mut self) -> Result<Async<Option<Request>>, Error> {
loop {
match self.timeout.poll().unwrap() {
Async::Ready(()) => {}
Async::NotReady => return Ok(Async::NotReady),
}
let mut min_time = Instant::now() + Duration::new(7, 0);
for _ in 0..self.cursors.len() {
let cur = self.cursors.pop_front().unwrap();
let req = request(&cur);
self.cursors.push_back(cur);
match req {
Ok(req) => return Ok(Async::Ready(Some(req))),
Err(time) if min_time > time => min_time = time,
Err(_) => {}
}
}
self.timeout = timeout_at(min_time);
}
}
}
impl<S> Codec<S> for Request {
type Future = Future
|
{
use std::io::BufReader;
use std::fs::File;
if urls_by_host.len() == 0 {
return Box::new(ok(()));
}
let resolver = resolver.clone();
let tls = Arc::new({
let mut cfg = ClientConfig::new();
let read_root = File::open("/etc/ssl/certs/ca-certificates.crt")
.map_err(|e| format!("{}", e))
.and_then(|f|
cfg.root_store.add_pem_file(&mut BufReader::new(f))
.map_err(|()| format!("unrecognized format")));
match read_root {
Ok((_, _)) => {} // TODO(tailhook) log numbers
Err(e) => {
warn!("Can find root certificates at {:?}: {}. \
Using embedded ones.",
|
identifier_body
|
fetch.rs
|
Cursor>>,
range: Option<(u64, u64, u64)>,
}
pub fn group_addrs(vec: Vec<(Address, Vec<Arc<Url>>)>)
-> HashMap<SocketAddr, Vec<Arc<Url>>>
{
let mut urls_by_ip = HashMap::new();
for (addr, urls) in vec {
for sa in addr.addresses_at(0) {
let set = urls_by_ip.entry(sa)
.or_insert_with(HashSet::new);
for url in &urls {
set.insert(url.clone());
}
}
}
let mut ordered = urls_by_ip.iter().collect::<Vec<_>>();
ordered.sort_by_key(|&(_, y)| y.len());
let mut active_ips = HashMap::new();
let mut visited_urls = HashSet::new();
for (ip, urls) in ordered {
let urls = urls.difference(&visited_urls).cloned().collect::<Vec<_>>();
if urls.len() == 0 {
continue;
}
visited_urls.extend(urls.iter().cloned());
active_ips.insert(*ip, urls);
}
return active_ips;
}
#[allow(dead_code)]
pub fn tls_host(host: &str) -> &str {
match host.find(':') {
Some(x) => &host[..x],
None => host,
}
}
pub fn
|
(resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
let resolver = resolver.clone();
let cfg = Config::new()
.keep_alive_timeout(Duration::new(25, 0))
.done();
return Box::new(
join_all(urls_by_host.into_iter().map(move |(host, list)| {
let h1 = host.clone();
resolver.resolve_auto(&host, 80).map(|ips| (ips, list))
.map_err(move |e| error!("Error resolving {:?}: {}", h1, e))
}))
.map(group_addrs)
.and_then(move |map| {
join_all(map.into_iter().map(move |(ip, urls)| {
let cfg = cfg.clone();
TcpStream::connect(&ip, &handle())
.map_err(move |e| {
error!("Error connecting to {}: {}", ip, e);
})
.and_then(move |sock| {
Proto::new(sock, &handle(), &cfg)
.send_all(Requests::new(urls))
.map(|_| unreachable!())
.map_err(move |e| {
error!("Error (ip: {}): {}", ip, e);
})
})
}))
.map(|_| ())
}));
}
#[cfg(not(any(feature="tls_native", feature="tls_rustls")))]
pub fn https(_resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
use futures::future::err;
if urls_by_host.len() > 0 {
eprintln!("Compiled without TLS support");
return Box::new(err(()));
}
return Box::new(ok(()));
}
#[cfg(feature="tls_native")]
pub fn https(resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
use std::io;
if urls_by_host.len() == 0 {
return Box::new(ok(()));
}
let resolver = resolver.clone();
let cfg = Config::new().done();
let cx = TlsConnector::builder().expect("tls builder can be created works")
.build().expect("tls builder works");
return Box::new(
join_all(urls_by_host.into_iter().map(move |(host, list)| {
let h1 = host.clone();
resolver.resolve_auto(&host, 80).map(|addr| (host, addr, list))
.map_err(move |e| error!("Error resolving {:?}: {}", h1, e))
}))
.and_then(move |map| {
join_all(map.into_iter().map(move |(host, addr, urls)| {
let ip = addr.pick_one().expect("no IPs");
let cfg = cfg.clone();
let cx = cx.clone();
TcpStream::connect(&ip, &handle())
.and_then(move |sock| {
cx.connect_async(tls_host(&host), sock).map_err(|e| {
io::Error::new(io::ErrorKind::Other, e)
})
})
.map_err(move |e| {
error!("Error connecting to {}: {}", ip, e);
})
.and_then(move |sock| {
Proto::new(sock, &handle(), &cfg)
.send_all(Requests::new(urls))
.map(|_| unreachable!())
.map_err(move |e| {
error!("Error (ip: {}): {}", ip, e);
})
})
}))
.map(|_| ())
}));
}
#[cfg(feature="tls_rustls")]
pub fn https(resolver: &Router, urls_by_host: HashMap<String, Vec<Arc<Url>>>)
-> Box<Future<Item=(), Error=()>>
{
use std::io::BufReader;
use std::fs::File;
if urls_by_host.len() == 0 {
return Box::new(ok(()));
}
let resolver = resolver.clone();
let tls = Arc::new({
let mut cfg = ClientConfig::new();
let read_root = File::open("/etc/ssl/certs/ca-certificates.crt")
.map_err(|e| format!("{}", e))
.and_then(|f|
cfg.root_store.add_pem_file(&mut BufReader::new(f))
.map_err(|()| format!("unrecognized format")));
match read_root {
Ok((_, _)) => {} // TODO(tailhook) log numbers
Err(e) => {
warn!("Can find root certificates at {:?}: {}. \
Using embedded ones.",
"/etc/ssl/certs/ca-certificates.crt", e);
}
}
cfg.root_store.add_server_trust_anchors(
&webpki_roots::TLS_SERVER_ROOTS);
cfg
});
let cfg = Config::new().done();
return Box::new(
join_all(urls_by_host.into_iter().map(move |(host, list)| {
resolver.resolve_auto(&host, 80).map(|addr| (host, addr, list))
}))
.map_err(|e| error!("Error resolving: {}", e))
.and_then(move |map| {
join_all(map.into_iter().map(move |(host, addr, urls)| {
let ip = addr.pick_one().expect("no ips");
let cfg = cfg.clone();
let tls = tls.clone();
TcpStream::connect(&ip, &handle())
.and_then(move |sock| {
tls.connect_async(tls_host(&host), sock)
})
.map_err(move |e| {
error!("Error connecting to {}: {}", ip, e);
})
.and_then(move |sock| {
Proto::new(sock, &handle(), &cfg)
.send_all(Requests::new(urls))
.map(|_| unreachable!())
.map_err(move |e| {
error!("Error (ip: {}): {}", ip, e);
})
})
}))
.map(|_| ())
}));
}
fn request(cur: &Arc<Mutex<Cursor>>) -> Result<Request, Instant> {
let intr = cur.lock().unwrap();
match intr.state {
None => return Ok(Request {
cursor: cur.clone(),
range: None,
}),
Some(ref state) => {
if state.eof == 0 {
return Ok(Request {
cursor: cur.clone(),
range: None,
});
}
let next = state.last_request +
Duration::from_millis(100 * min(state.eof as u64, 70));
if next < Instant::now() {
return Ok(Request {
cursor: cur.clone(),
range: None,
});
}
return Err(next);
}
}
}
impl Requests {
fn new(urls: Vec<Arc<Url>>) -> Requests {
Requests {
cursors: urls.into_iter().map(|u| Arc::new(Mutex::new(Cursor {
url: u,
state: None,
}))).collect(),
timeout: timeout_at(Instant::now()),
}
}
}
impl Stream for Requests {
type Item = Request;
type Error = Error;
fn poll(&mut self) -> Result<Async<Option<Request>>, Error> {
loop {
match self.timeout.poll().unwrap() {
Async::Ready(()) => {}
Async::NotReady => return Ok(Async::NotReady),
}
let mut min_time = Instant::now() + Duration::new(7, 0);
for _ in 0..self.cursors.len() {
let cur = self.cursors.pop_front().unwrap();
let req = request(&cur);
self.cursors.push_back(cur);
match req {
Ok(req) => return Ok(Async::Ready(Some(req))),
Err(time) if min_time > time => min_time = time,
Err(_) => {}
}
}
self.timeout = timeout_at(min_time);
}
}
}
impl<S> Codec<S> for Request {
type Future =
|
http
|
identifier_name
|
dyn_cor_rel.py
|
, by, rg):
s_c = s[..., np.newaxis]
l_by_rg = np.array([[1, 1, 1],
[1/3, 1/3, -2/3],
[1, -1, 0]]
)
l_by_rg = (l_by_rg/((l_by_rg**2).sum(1, keepdims=True)**0.5))
rgb = l_by_rg[0]*lum + l_by_rg[1]*by + l_by_rg[2]*rg
s_c = s_c*rgb[np.newaxis,np.newaxis]
return s_c
def sine_chrom(nx, ny, x_0, y_0, sf, ori, phase, lum, by, rg, bg=0):
s = sinusoid_2d(nx, ny, x_0, y_0, sf, ori, phase, bg=0)
s_c = colorize(s, lum, by, rg)
return scale_im(s_c)
def sine_chrom_dual(nx, ny, x_0, y_0, sf, ori, phase,
lum, by, rg, bg=0,
sf2=None, rel_ori=None, phase2=None,
lum2=None, by2=None, rg2=None, make_window=False, radius=None):
if sf2 is None:
sf2 = sf
if rel_ori is None:
rel_ori = 0
if phase2 is None:
phase2 = phase
if lum2 is None:
lum2 = lum
if by2 is None:
by2 = by
if rg2 is None:
rg2 = rg
s1 = sine_chrom(nx, ny, x_0, y_0, sf, ori, phase, lum, by, rg, bg=0)
s2 = sine_chrom(nx, ny, x_0, y_0, sf2, ori+rel_ori, phase2,
lum2, by2, rg2)
s = s1+s2
if make_window:
w = window(radius, x_0, y_0, nx, ny)
s = w[..., np.newaxis]*s
return s
def norm_im(im):
im = im - im.min()
im = im/im.max()
return im
def scale_im(im):
im = im - im.min()
im = 2*im/im.max()
im = im-1
return im
mod = models.alexnet(pretrained=True).features[:1]
w = list(mod.parameters())[0].detach().numpy()
w_da = xr.DataArray(w, dims=('unit', 'channel', 'row', 'col'))
n_units = w.shape[0]
w_da_noise = w_da.copy(deep=True)
w_da_noise[...] = np.random.normal(size=w_da.shape, scale=0.1)
#%%
nx = ny = 11
stims = []
ori = list(np.linspace(0, 180-180/64, 64))
phase = list(np.linspace(0, 360-360/8, 8))
sf = list(np.logspace(np.log10(0.1), np.log10(.25), 8))
contrast = [1,]
lbr = [1,0,0]
make_window = False
param_nms = ['ori', 'sf', 'phase']
params = [ori, sf, phase]
for i, p in enumerate(params):
if not type(p) is list:
params[i] = [p,]
cart_prod_params = np.array(list(product(*params)))
da = xr.DataArray(np.zeros(tuple(len(p) for p in params )),
dims=param_nms,
coords=params )
da_stims = da.squeeze(drop=True).expand_dims({'row':range(11),
'col':range(11),
'channel':range(3)})
da_stims = da_stims.transpose('ori', 'sf', 'phase', 'row', 'col', 'channel').copy()
x_0 = y_0 = 5
stim =[]
for p in (cart_prod_params):
#plt.figure()
ori, sf, phase = p
im = sine_chrom_dual(nx, ny, x_0, y_0, sf, ori, phase,
lbr[0], lbr[1], lbr[2], bg=0,
sf2=sf, rel_ori=0, phase2=phase,
lum2=lbr[0], by2=lbr[1], rg2=lbr[2],
make_window=make_window)
#plt.imshow(norm_im(w[...,np.newaxis]*im))
stim.append(im)
stims.append(stim)
stims = np.array(stims).squeeze()
rs = []
for stim, param in zip(stims, cart_prod_params):
ori, sf, phase = param
da_stims.loc[ori, sf, phase] = stim.copy()
#%%
w_da = w_da/(w_da**2).sum(('channel', 'row', 'col'))**0.5
#da_stims = da_stims/(da_stims**2).sum(('channel', 'row', 'col'))**0.5
da_sig = da_stims.dot(w_da)
da_noise = da_stims.dot(w_da_noise)
#%%
n_units = len(da_sig.coords['unit'].values)
unit_coords = list(product(range(n_units),range(n_units),))
mod_cor = xr.DataArray(np.zeros((n_units, n_units, 4, 2)),
dims=['unit_r', 'unit_c', 'vars', 'sn'],
coords=[range(n_units), range(n_units),
['cor', 'dyn', 'ori_ind', 'phase_ind'],
['s','n']])
sf_ind = 0
dim=('phase', 'ori')
for ind1, ind2 in tqdm((unit_coords)):
for i, da in enumerate([da_sig, da_noise]):
u1 = (da.isel(unit=ind1, sf=sf_ind).squeeze()**2).sum('phase')**0.5
u2 = (da.isel(unit=ind2, sf=sf_ind).squeeze()**2).sum('phase')**0.5
corr = auto_corr(u1, u2, dim=('ori'), pad=None)
r = np.max(np.real(corr))
mod_cor[ind1, ind2, 0, i] = r
mod_cor[ind1, ind2, 1, i] = u1.std()*u2.std()
mod_cor[ind1, ind2, 2:, i] = np.array(np.unravel_index(np.argmax(corr), corr.shape))
#%%
dfs = [mod_cor[...,i,:].to_dataframe(name=str(mod_cor.coords['vars'][i].values)).drop('vars', axis=1)
for i in range(len(mod_cor.coords['vars']))]
df = pd.concat(dfs, 1)
m_inds = np.array([np.array(a) for a in df.index.values])
drop_inds = m_inds[:,0]<m_inds[:,1]
df_d = df[drop_inds]
df_d = df_d.reorder_levels([2,0,1])
#%%
def fz(r):
return 0.5*(np.log((1+r)/(1-r)))
from scipy import stats
plt.figure(figsize=(4,3))
df = df_d.loc['s']
rs = []
for df in [df_d.loc['s'], df_d.loc['n']]:
r,p = (stats.spearmanr(df['dyn'], df['cor']**2))
rs.append(r)
plt.scatter(df['dyn'], df['cor']**2, s=1);plt.semilogx();
print(p)
plt.xlim(0.01, 1000)
plt.ylim(0,1.1)
plt.xlabel('Dynamic range')
plt.ylabel('$r^2_{ER}$')
plt.title('Trained $r=$' + str(np.round(rs[0],2)) +
', untrained $r=$' + str(np.round(rs[1],2)))
inds = []
plt.legend(['Trained', 'Untrained'])
df = df_d.loc['s']
for i, ind in enumerate([0,-12,-100, 0,-12, -100]):
if i<=2:
ranks = (df['cor'].rank() + df['dyn'].rank()).sort_values()[::-1]
else:
ranks = (df['cor'].rank() - df['dyn'].rank()).sort_values()[::-1]
|
u1, u2 = ranks.index.values[ind]
inds.append([u1,u2])
plt.scatter(df['dyn'][u1,u2], df['cor'][u1,u2]**2, s=10, c='r');plt.semilogx();
#%%
j=0
plt.figure(figsize=(3,8))
for ind in inds:
u1, u2 = ind
u1r = (da_sig.isel(unit=u1, sf=sf_ind).squeeze()**2).sum('phase')**0.5
u2r = (da_sig.isel(unit=u2, sf=sf_ind).squeeze()**2).sum('phase')**0.5
j+=1
plt.subplot
|
random_line_split
|
|
dyn_cor_rel.py
|
, by, rg):
s_c = s[..., np.newaxis]
l_by_rg = np.array([[1, 1, 1],
[1/3, 1/3, -2/3],
[1, -1, 0]]
)
l_by_rg = (l_by_rg/((l_by_rg**2).sum(1, keepdims=True)**0.5))
rgb = l_by_rg[0]*lum + l_by_rg[1]*by + l_by_rg[2]*rg
s_c = s_c*rgb[np.newaxis,np.newaxis]
return s_c
def sine_chrom(nx, ny, x_0, y_0, sf, ori, phase, lum, by, rg, bg=0):
s = sinusoid_2d(nx, ny, x_0, y_0, sf, ori, phase, bg=0)
s_c = colorize(s, lum, by, rg)
return scale_im(s_c)
def sine_chrom_dual(nx, ny, x_0, y_0, sf, ori, phase,
lum, by, rg, bg=0,
sf2=None, rel_ori=None, phase2=None,
lum2=None, by2=None, rg2=None, make_window=False, radius=None):
if sf2 is None:
sf2 = sf
if rel_ori is None:
rel_ori = 0
if phase2 is None:
phase2 = phase
if lum2 is None:
|
if by2 is None:
by2 = by
if rg2 is None:
rg2 = rg
s1 = sine_chrom(nx, ny, x_0, y_0, sf, ori, phase, lum, by, rg, bg=0)
s2 = sine_chrom(nx, ny, x_0, y_0, sf2, ori+rel_ori, phase2,
lum2, by2, rg2)
s = s1+s2
if make_window:
w = window(radius, x_0, y_0, nx, ny)
s = w[..., np.newaxis]*s
return s
def norm_im(im):
im = im - im.min()
im = im/im.max()
return im
def scale_im(im):
im = im - im.min()
im = 2*im/im.max()
im = im-1
return im
mod = models.alexnet(pretrained=True).features[:1]
w = list(mod.parameters())[0].detach().numpy()
w_da = xr.DataArray(w, dims=('unit', 'channel', 'row', 'col'))
n_units = w.shape[0]
w_da_noise = w_da.copy(deep=True)
w_da_noise[...] = np.random.normal(size=w_da.shape, scale=0.1)
#%%
nx = ny = 11
stims = []
ori = list(np.linspace(0, 180-180/64, 64))
phase = list(np.linspace(0, 360-360/8, 8))
sf = list(np.logspace(np.log10(0.1), np.log10(.25), 8))
contrast = [1,]
lbr = [1,0,0]
make_window = False
param_nms = ['ori', 'sf', 'phase']
params = [ori, sf, phase]
for i, p in enumerate(params):
if not type(p) is list:
params[i] = [p,]
cart_prod_params = np.array(list(product(*params)))
da = xr.DataArray(np.zeros(tuple(len(p) for p in params )),
dims=param_nms,
coords=params )
da_stims = da.squeeze(drop=True).expand_dims({'row':range(11),
'col':range(11),
'channel':range(3)})
da_stims = da_stims.transpose('ori', 'sf', 'phase', 'row', 'col', 'channel').copy()
x_0 = y_0 = 5
stim =[]
for p in (cart_prod_params):
#plt.figure()
ori, sf, phase = p
im = sine_chrom_dual(nx, ny, x_0, y_0, sf, ori, phase,
lbr[0], lbr[1], lbr[2], bg=0,
sf2=sf, rel_ori=0, phase2=phase,
lum2=lbr[0], by2=lbr[1], rg2=lbr[2],
make_window=make_window)
#plt.imshow(norm_im(w[...,np.newaxis]*im))
stim.append(im)
stims.append(stim)
stims = np.array(stims).squeeze()
rs = []
for stim, param in zip(stims, cart_prod_params):
ori, sf, phase = param
da_stims.loc[ori, sf, phase] = stim.copy()
#%%
w_da = w_da/(w_da**2).sum(('channel', 'row', 'col'))**0.5
#da_stims = da_stims/(da_stims**2).sum(('channel', 'row', 'col'))**0.5
da_sig = da_stims.dot(w_da)
da_noise = da_stims.dot(w_da_noise)
#%%
n_units = len(da_sig.coords['unit'].values)
unit_coords = list(product(range(n_units),range(n_units),))
mod_cor = xr.DataArray(np.zeros((n_units, n_units, 4, 2)),
dims=['unit_r', 'unit_c', 'vars', 'sn'],
coords=[range(n_units), range(n_units),
['cor', 'dyn', 'ori_ind', 'phase_ind'],
['s','n']])
sf_ind = 0
dim=('phase', 'ori')
for ind1, ind2 in tqdm((unit_coords)):
for i, da in enumerate([da_sig, da_noise]):
u1 = (da.isel(unit=ind1, sf=sf_ind).squeeze()**2).sum('phase')**0.5
u2 = (da.isel(unit=ind2, sf=sf_ind).squeeze()**2).sum('phase')**0.5
corr = auto_corr(u1, u2, dim=('ori'), pad=None)
r = np.max(np.real(corr))
mod_cor[ind1, ind2, 0, i] = r
mod_cor[ind1, ind2, 1, i] = u1.std()*u2.std()
mod_cor[ind1, ind2, 2:, i] = np.array(np.unravel_index(np.argmax(corr), corr.shape))
#%%
dfs = [mod_cor[...,i,:].to_dataframe(name=str(mod_cor.coords['vars'][i].values)).drop('vars', axis=1)
for i in range(len(mod_cor.coords['vars']))]
df = pd.concat(dfs, 1)
m_inds = np.array([np.array(a) for a in df.index.values])
drop_inds = m_inds[:,0]<m_inds[:,1]
df_d = df[drop_inds]
df_d = df_d.reorder_levels([2,0,1])
#%%
def fz(r):
return 0.5*(np.log((1+r)/(1-r)))
from scipy import stats
plt.figure(figsize=(4,3))
df = df_d.loc['s']
rs = []
for df in [df_d.loc['s'], df_d.loc['n']]:
r,p = (stats.spearmanr(df['dyn'], df['cor']**2))
rs.append(r)
plt.scatter(df['dyn'], df['cor']**2, s=1);plt.semilogx();
print(p)
plt.xlim(0.01, 1000)
plt.ylim(0,1.1)
plt.xlabel('Dynamic range')
plt.ylabel('$r^2_{ER}$')
plt.title('Trained $r=$' + str(np.round(rs[0],2)) +
', untrained $r=$' + str(np.round(rs[1],2)))
inds = []
plt.legend(['Trained', 'Untrained'])
df = df_d.loc['s']
for i, ind in enumerate([0,-12,-100, 0,-12, -100]):
if i<=2:
ranks = (df['cor'].rank() + df['dyn'].rank()).sort_values()[::-1]
else:
ranks = (df['cor'].rank() - df['dyn'].rank()).sort_values()[::-1]
u1, u2 = ranks.index.values[ind]
inds.append([u1,u2])
plt.scatter(df['dyn'][u1,u2], df['cor'][u1,u2]**2, s=10, c='r');plt.semilogx();
#%%
j=0
plt.figure(figsize=(3,8))
for ind in inds:
u1, u2 = ind
u1r = (da_sig.isel(unit=u1, sf=sf_ind).squeeze()**2).sum('phase')**0.5
u2r = (da_sig.isel(unit=u2, sf=sf_ind).squeeze()**2).sum('phase')**0.5
j+=1
plt.subplot
|
lum2 = lum
|
conditional_block
|
dyn_cor_rel.py
|
(radius, x_0, y_0, nx, ny):
x_coords = np.arange(0, nx, dtype=np.float128) - x_0
y_coords = np.arange(0, ny, dtype=np.float128) - y_0
xx, yy = np.meshgrid(x_coords, y_coords)
d = (xx**2 + yy**2)**0.5
w = np.zeros((int(nx), int(ny)))
w[d<=radius] = 1
return w
def cos_window(radius, x_0, y_0, nx, ny):
x_coords = np.arange(0, nx, dtype=np.float128) - x_0
y_coords = np.arange(0, ny, dtype=np.float128) - y_0
xx, yy = np.meshgrid(x_coords, y_coords)
d = (xx**2 + yy**2)**0.5
w = np.cos(d*np.pi*(1/radius)) + 1
w[d>radius] = 0
return w
def colorize(s, lum, by, rg):
s_c = s[..., np.newaxis]
l_by_rg = np.array([[1, 1, 1],
[1/3, 1/3, -2/3],
[1, -1, 0]]
)
l_by_rg = (l_by_rg/((l_by_rg**2).sum(1, keepdims=True)**0.5))
rgb = l_by_rg[0]*lum + l_by_rg[1]*by + l_by_rg[2]*rg
s_c = s_c*rgb[np.newaxis,np.newaxis]
return s_c
def sine_chrom(nx, ny, x_0, y_0, sf, ori, phase, lum, by, rg, bg=0):
s = sinusoid_2d(nx, ny, x_0, y_0, sf, ori, phase, bg=0)
s_c = colorize(s, lum, by, rg)
return scale_im(s_c)
def sine_chrom_dual(nx, ny, x_0, y_0, sf, ori, phase,
lum, by, rg, bg=0,
sf2=None, rel_ori=None, phase2=None,
lum2=None, by2=None, rg2=None, make_window=False, radius=None):
if sf2 is None:
sf2 = sf
if rel_ori is None:
rel_ori = 0
if phase2 is None:
phase2 = phase
if lum2 is None:
lum2 = lum
if by2 is None:
by2 = by
if rg2 is None:
rg2 = rg
s1 = sine_chrom(nx, ny, x_0, y_0, sf, ori, phase, lum, by, rg, bg=0)
s2 = sine_chrom(nx, ny, x_0, y_0, sf2, ori+rel_ori, phase2,
lum2, by2, rg2)
s = s1+s2
if make_window:
w = window(radius, x_0, y_0, nx, ny)
s = w[..., np.newaxis]*s
return s
def norm_im(im):
im = im - im.min()
im = im/im.max()
return im
def scale_im(im):
im = im - im.min()
im = 2*im/im.max()
im = im-1
return im
mod = models.alexnet(pretrained=True).features[:1]
w = list(mod.parameters())[0].detach().numpy()
w_da = xr.DataArray(w, dims=('unit', 'channel', 'row', 'col'))
n_units = w.shape[0]
w_da_noise = w_da.copy(deep=True)
w_da_noise[...] = np.random.normal(size=w_da.shape, scale=0.1)
#%%
nx = ny = 11
stims = []
ori = list(np.linspace(0, 180-180/64, 64))
phase = list(np.linspace(0, 360-360/8, 8))
sf = list(np.logspace(np.log10(0.1), np.log10(.25), 8))
contrast = [1,]
lbr = [1,0,0]
make_window = False
param_nms = ['ori', 'sf', 'phase']
params = [ori, sf, phase]
for i, p in enumerate(params):
if not type(p) is list:
params[i] = [p,]
cart_prod_params = np.array(list(product(*params)))
da = xr.DataArray(np.zeros(tuple(len(p) for p in params )),
dims=param_nms,
coords=params )
da_stims = da.squeeze(drop=True).expand_dims({'row':range(11),
'col':range(11),
'channel':range(3)})
da_stims = da_stims.transpose('ori', 'sf', 'phase', 'row', 'col', 'channel').copy()
x_0 = y_0 = 5
stim =[]
for p in (cart_prod_params):
#plt.figure()
ori, sf, phase = p
im = sine_chrom_dual(nx, ny, x_0, y_0, sf, ori, phase,
lbr[0], lbr[1], lbr[2], bg=0,
sf2=sf, rel_ori=0, phase2=phase,
lum2=lbr[0], by2=lbr[1], rg2=lbr[2],
make_window=make_window)
#plt.imshow(norm_im(w[...,np.newaxis]*im))
stim.append(im)
stims.append(stim)
stims = np.array(stims).squeeze()
rs = []
for stim, param in zip(stims, cart_prod_params):
ori, sf, phase = param
da_stims.loc[ori, sf, phase] = stim.copy()
#%%
w_da = w_da/(w_da**2).sum(('channel', 'row', 'col'))**0.5
#da_stims = da_stims/(da_stims**2).sum(('channel', 'row', 'col'))**0.5
da_sig = da_stims.dot(w_da)
da_noise = da_stims.dot(w_da_noise)
#%%
n_units = len(da_sig.coords['unit'].values)
unit_coords = list(product(range(n_units),range(n_units),))
mod_cor = xr.DataArray(np.zeros((n_units, n_units, 4, 2)),
dims=['unit_r', 'unit_c', 'vars', 'sn'],
coords=[range(n_units), range(n_units),
['cor', 'dyn', 'ori_ind', 'phase_ind'],
['s','n']])
sf_ind = 0
dim=('phase', 'ori')
for ind1, ind2 in tqdm((unit_coords)):
for i, da in enumerate([da_sig, da_noise]):
u1 = (da.isel(unit=ind1, sf=sf_ind).squeeze()**2).sum('phase')**0.5
u2 = (da.isel(unit=ind2, sf=sf_ind).squeeze()**2).sum('phase')**0.5
corr = auto_corr(u1, u2, dim=('ori'), pad=None)
r = np.max(np.real(corr))
mod_cor[ind1, ind2, 0, i] = r
mod_cor[ind1, ind2, 1, i] = u1.std()*u2.std()
mod_cor[ind1, ind2, 2:, i] = np.array(np.unravel_index(np.argmax(corr), corr.shape))
#%%
dfs = [mod_cor[...,i,:].to_dataframe(name=str(mod_cor.coords['vars'][i].values)).drop('vars', axis=1)
for i in range(len(mod_cor.coords['vars']))]
df = pd.concat(dfs, 1)
m_inds = np.array([np.array(a) for a in df.index.values])
drop_inds = m_inds[:,0]<m_inds[:,1]
df_d = df[drop_inds]
df_d = df_d.reorder_levels([2,0,1])
#%%
def fz(r):
return 0.5*(np.log((1+r)/(1-r)))
from scipy import stats
plt.figure(figsize=(4,3))
df = df_d.loc['s']
rs = []
for df in [df_d.loc['s'], df_d.loc['n']]:
r,p = (stats.spearmanr(df['dyn'], df['cor']**2))
rs.append(r)
plt.scatter(df['dyn'], df['cor']**2, s=1);plt.semilogx();
print(p)
plt.xlim(0.01, 1000)
plt.ylim(0,1.1)
plt.xlabel('Dynamic range')
plt.ylabel('$r^2_{ER}$')
plt.title('Trained $r=$' + str(np.round(rs[0],2)) +
', untrained $r=$' + str(np.round(rs[1],2)))
inds = []
plt.legend(['Trained', 'Untrained'])
df = df
|
window
|
identifier_name
|
|
dyn_cor_rel.py
|
def window(radius, x_0, y_0, nx, ny):
x_coords = np.arange(0, nx, dtype=np.float128) - x_0
y_coords = np.arange(0, ny, dtype=np.float128) - y_0
xx, yy = np.meshgrid(x_coords, y_coords)
d = (xx**2 + yy**2)**0.5
w = np.zeros((int(nx), int(ny)))
w[d<=radius] = 1
return w
def cos_window(radius, x_0, y_0, nx, ny):
x_coords = np.arange(0, nx, dtype=np.float128) - x_0
y_coords = np.arange(0, ny, dtype=np.float128) - y_0
xx, yy = np.meshgrid(x_coords, y_coords)
d = (xx**2 + yy**2)**0.5
w = np.cos(d*np.pi*(1/radius)) + 1
w[d>radius] = 0
return w
def colorize(s, lum, by, rg):
s_c = s[..., np.newaxis]
l_by_rg = np.array([[1, 1, 1],
[1/3, 1/3, -2/3],
[1, -1, 0]]
)
l_by_rg = (l_by_rg/((l_by_rg**2).sum(1, keepdims=True)**0.5))
rgb = l_by_rg[0]*lum + l_by_rg[1]*by + l_by_rg[2]*rg
s_c = s_c*rgb[np.newaxis,np.newaxis]
return s_c
def sine_chrom(nx, ny, x_0, y_0, sf, ori, phase, lum, by, rg, bg=0):
s = sinusoid_2d(nx, ny, x_0, y_0, sf, ori, phase, bg=0)
s_c = colorize(s, lum, by, rg)
return scale_im(s_c)
def sine_chrom_dual(nx, ny, x_0, y_0, sf, ori, phase,
lum, by, rg, bg=0,
sf2=None, rel_ori=None, phase2=None,
lum2=None, by2=None, rg2=None, make_window=False, radius=None):
if sf2 is None:
sf2 = sf
if rel_ori is None:
rel_ori = 0
if phase2 is None:
phase2 = phase
if lum2 is None:
lum2 = lum
if by2 is None:
by2 = by
if rg2 is None:
rg2 = rg
s1 = sine_chrom(nx, ny, x_0, y_0, sf, ori, phase, lum, by, rg, bg=0)
s2 = sine_chrom(nx, ny, x_0, y_0, sf2, ori+rel_ori, phase2,
lum2, by2, rg2)
s = s1+s2
if make_window:
w = window(radius, x_0, y_0, nx, ny)
s = w[..., np.newaxis]*s
return s
def norm_im(im):
im = im - im.min()
im = im/im.max()
return im
def scale_im(im):
im = im - im.min()
im = 2*im/im.max()
im = im-1
return im
mod = models.alexnet(pretrained=True).features[:1]
w = list(mod.parameters())[0].detach().numpy()
w_da = xr.DataArray(w, dims=('unit', 'channel', 'row', 'col'))
n_units = w.shape[0]
w_da_noise = w_da.copy(deep=True)
w_da_noise[...] = np.random.normal(size=w_da.shape, scale=0.1)
#%%
nx = ny = 11
stims = []
ori = list(np.linspace(0, 180-180/64, 64))
phase = list(np.linspace(0, 360-360/8, 8))
sf = list(np.logspace(np.log10(0.1), np.log10(.25), 8))
contrast = [1,]
lbr = [1,0,0]
make_window = False
param_nms = ['ori', 'sf', 'phase']
params = [ori, sf, phase]
for i, p in enumerate(params):
if not type(p) is list:
params[i] = [p,]
cart_prod_params = np.array(list(product(*params)))
da = xr.DataArray(np.zeros(tuple(len(p) for p in params )),
dims=param_nms,
coords=params )
da_stims = da.squeeze(drop=True).expand_dims({'row':range(11),
'col':range(11),
'channel':range(3)})
da_stims = da_stims.transpose('ori', 'sf', 'phase', 'row', 'col', 'channel').copy()
x_0 = y_0 = 5
stim =[]
for p in (cart_prod_params):
#plt.figure()
ori, sf, phase = p
im = sine_chrom_dual(nx, ny, x_0, y_0, sf, ori, phase,
lbr[0], lbr[1], lbr[2], bg=0,
sf2=sf, rel_ori=0, phase2=phase,
lum2=lbr[0], by2=lbr[1], rg2=lbr[2],
make_window=make_window)
#plt.imshow(norm_im(w[...,np.newaxis]*im))
stim.append(im)
stims.append(stim)
stims = np.array(stims).squeeze()
rs = []
for stim, param in zip(stims, cart_prod_params):
ori, sf, phase = param
da_stims.loc[ori, sf, phase] = stim.copy()
#%%
w_da = w_da/(w_da**2).sum(('channel', 'row', 'col'))**0.5
#da_stims = da_stims/(da_stims**2).sum(('channel', 'row', 'col'))**0.5
da_sig = da_stims.dot(w_da)
da_noise = da_stims.dot(w_da_noise)
#%%
n_units = len(da_sig.coords['unit'].values)
unit_coords = list(product(range(n_units),range(n_units),))
mod_cor = xr.DataArray(np.zeros((n_units, n_units, 4, 2)),
dims=['unit_r', 'unit_c', 'vars', 'sn'],
coords=[range(n_units), range(n_units),
['cor', 'dyn', 'ori_ind', 'phase_ind'],
['s','n']])
sf_ind = 0
dim=('phase', 'ori')
for ind1, ind2 in tqdm((unit_coords)):
for i, da in enumerate([da_sig, da_noise]):
u1 = (da.isel(unit=ind1, sf=sf_ind).squeeze()**2).sum('phase')**0.5
u2 = (da.isel(unit=ind2, sf=sf_ind).squeeze()**2).sum('phase')**0.5
corr = auto_corr(u1, u2, dim=('ori'), pad=None)
r = np.max(np.real(corr))
mod_cor[ind1, ind2, 0, i] = r
mod_cor[ind1, ind2, 1, i] = u1.std()*u2.std()
mod_cor[ind1, ind2, 2:, i] = np.array(np.unravel_index(np.argmax(corr), corr.shape))
#%%
dfs = [mod_cor[...,i,:].to_dataframe(name=str(mod_cor.coords['vars'][i].values)).drop('vars', axis=1)
for i in range(len(mod_cor.coords['vars']))]
df = pd.concat(dfs, 1)
m_inds = np.array([np.array(a) for a in df.index.values])
drop_inds = m_inds[:,0]<m_inds[:,1]
df_d = df[drop_inds]
df_d = df_d.reorder_levels([2,0,1])
#%%
def fz(r):
return 0.5*(np.log((1+r)/(1-r)))
from scipy import stats
plt.figure(figsize=(4,3))
df = df_d.loc['s']
rs = []
for df in [df_d.loc['s'], df_d.loc['n']]:
r,p = (stats.spearmanr(df['dyn'], df['cor']**
|
x_coords = np.arange(0, nx, dtype=np.float64) - x_0
y_coords = np.arange(0, ny, dtype=np.float64) - y_0
xx, yy = np.meshgrid(x_coords, y_coords)
mu_0, nu_0 = pol2cart(sf, np.deg2rad(ori + 90))
s = np.sin(2*np.pi*(mu_0*xx + nu_0*yy) + np.deg2rad(phase + 90))
s = s + bg
return s
|
identifier_body
|
|
Run_all_models_modified.py
|
.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import ComplementNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
class Bunch(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
# output logs to stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# show how to call hashing function
op = OptionParser()
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
#============================================================================
# Read custom data
#============================================================================
polarity = []
user_tweets = []
balance_classes = 1 # if 0, it only trims class 4, else trims all classes to size cls_num_ext
cls_0_num = 0
cls_1_num = 0
cls_2_num = 0
cls_3_num = 0
cls_4_num = 0
cls_num_ext = 500
num_ext = 0
with open("Categorized_User_Polarity.txt") as inp:
for line in inp:
values = line.split("\t") # id, polarity
user_id = values[0]
user_file = "tokens_lines_test/" + user_id
if os.path.isfile(user_file): # not all user IDs had tweets in the master tweet file
if not balance_classes:
if int(values[1]) == 4:
if num_ext < 2000:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
num_ext += 1
else:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
else:
if int(values[1]) == 0:
if cls_0_num < cls_num_ext:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
cls_0_num += 1
if int(values[1]) == 0:
if cls_1_num < cls_num_ext:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
cls_1_num += 1
if int(values[1]) == 0:
if cls_2_num < cls_num_ext:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
cls_2_num += 1
if int(values[1]) == 0:
if cls_3_num < cls_num_ext:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
cls_3_num += 1
else: # class 4
if cls_4_num < cls_num_ext:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
cls_4_num += 1
#============================================================================
# Split into training and testing sets
#============================================================================
# X y % data used for testing
raw_X_train, raw_X_test, raw_y_train, raw_y_test = train_test_split(user_tweets, polarity, test_size=0.2)
categories = ['far_left','mid_left','neutral','mid_right','far_right'] # for 5 classes
data_train = Bunch()
data_train.data = raw_X_train
data_train.target_names = ['far_left','mid_left','neutral','mid_right','far_right'] # for 5 classes
data_train.target = raw_y_train
data_test = Bunch()
data_test.data = raw_X_test
data_test.target = raw_y_test
target_names = data_train.target_names # Note: order of labels in `target_names` can be different from `categories`
print('data loaded')
#============================================================================
# Create target vectors
#============================================================================
y_train, y_test = data_train.target, data_test.target
init_time = time()
if opts.use_hashing:
print("Using hashing vectorizer")
vectorizer = HashingVectorizer(stop_words='english', alternate_sign=False, n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
print("Using tfidf vectorizer")
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test set")
init_time = time()
X_test = vectorizer.transform(data_test.data)
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from word to a string of tokens
if opts.use_hashing:
names_of_features = None
else:
names_of_features = vectorizer.get_feature_names()
if names_of_features:
names_of_features = np.asarray(names_of_features)
def trim(s):
|
#============================================================================
# Benchmark classifiers
#============================================================================
def benchmark(clf):
print('_' * 80)
print("Starting training: ")
print(clf)
init_time = time()
clf.fit(X_train, y_train)
time_to_train = time() - init_time
print("train time: %0.3fs" % time_to_train)
print("Starting testing: ")
init_time = time()
prediction = clf.predict(X_test)
time_to_test = time() - init_time
print("test time: %0.3fs" % time_to_test)
print("Scoring the model: ")
accuracy = metrics.accuracy_score(y_test, prediction)
print("accuracy: %0.3f" % accuracy)
clf_descr = str(clf).split('(')[0]
return clf_descr, accuracy, time_to_train, time_to_test
all_model_results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="sag"), "Ridge Classifier"),
(Perceptron(max_iter=50, tol=1e-3), "Perceptron"),
(PassiveAggressiveClassifier(max_iter=50, tol=1e-3), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
all_model_results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
all_model_results.append(benchmark(LinearSVC(penalty=penalty, dual=False, tol=1e-3))) # Create and train lib-linear models
all_model_results.append(benchmark(SGDClassifier(alpha=.0001, max_iter=50, penalty=penalty))) # Create and train stochastic gradient models
print('=' * 80)
print("SGD with Elastic-Net")
all_model_results.append(benchmark(SGDClassifier(alpha=.0001, max_iter=50, penalty="elasticnet"))) # Create and train SGD w/ elastic penalty
print('=' * 80)
print("Nearest Centroid")
all_model_results.append(benchmark(NearestCentroid())) # Train NearestCentroid without threshold
print('=' * 80)
print("Naive Bayes (multinomial, bernoulli, and complement)")
all_model_results.append(benchmark(MultinomialNB(alpha=.01))) # Train sparse Naive Bayes classifiers
all_model_results.append(benchmark(BernoulliNB(alpha=.01)))
all_model_results.append(benchmark(ComplementNB(alpha=.1)))
print('=' * 80)
print("Linear SVC with l1")
all_model_results.append(benchmark(Pipeline([('feature_selection', SelectFromModel(LinearSVC(penalty="l1", dual=False, tol=1e-3))),
('classification', LinearSVC(penalty="l2"))])))
|
return s if len(s) <= 80 else s[:77] + "..."
|
identifier_body
|
Run_all_models_modified.py
|
.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import ComplementNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
class Bunch(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
# output logs to stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# show how to call hashing function
op = OptionParser()
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
#============================================================================
# Read custom data
#============================================================================
polarity = []
user_tweets = []
balance_classes = 1 # if 0, it only trims class 4, else trims all classes to size cls_num_ext
cls_0_num = 0
cls_1_num = 0
cls_2_num = 0
cls_3_num = 0
cls_4_num = 0
cls_num_ext = 500
num_ext = 0
with open("Categorized_User_Polarity.txt") as inp:
for line in inp:
values = line.split("\t") # id, polarity
user_id = values[0]
user_file = "tokens_lines_test/" + user_id
if os.path.isfile(user_file): # not all user IDs had tweets in the master tweet file
if not balance_classes:
if int(values[1]) == 4:
if num_ext < 2000:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
num_ext += 1
else:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
else:
if int(values[1]) == 0:
if cls_0_num < cls_num_ext:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
cls_0_num += 1
if int(values[1]) == 0:
if cls_1_num < cls_num_ext:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
cls_1_num += 1
if int(values[1]) == 0:
if cls_2_num < cls_num_ext:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
cls_2_num += 1
if int(values[1]) == 0:
if cls_3_num < cls_num_ext:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
cls_3_num += 1
else: # class 4
if cls_4_num < cls_num_ext:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
cls_4_num += 1
#============================================================================
# Split into training and testing sets
#============================================================================
# X y % data used for testing
raw_X_train, raw_X_test, raw_y_train, raw_y_test = train_test_split(user_tweets, polarity, test_size=0.2)
categories = ['far_left','mid_left','neutral','mid_right','far_right'] # for 5 classes
data_train = Bunch()
data_train.data = raw_X_train
data_train.target_names = ['far_left','mid_left','neutral','mid_right','far_right'] # for 5 classes
data_train.target = raw_y_train
data_test = Bunch()
data_test.data = raw_X_test
data_test.target = raw_y_test
target_names = data_train.target_names # Note: order of labels in `target_names` can be different from `categories`
print('data loaded')
#============================================================================
# Create target vectors
#============================================================================
y_train, y_test = data_train.target, data_test.target
init_time = time()
if opts.use_hashing:
print("Using hashing vectorizer")
vectorizer = HashingVectorizer(stop_words='english', alternate_sign=False, n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
print("Using tfidf vectorizer")
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test set")
init_time = time()
X_test = vectorizer.transform(data_test.data)
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from word to a string of tokens
if opts.use_hashing:
names_of_features = None
else:
|
if names_of_features:
names_of_features = np.asarray(names_of_features)
def trim(s):
return s if len(s) <= 80 else s[:77] + "..."
#============================================================================
# Benchmark classifiers
#============================================================================
def benchmark(clf):
print('_' * 80)
print("Starting training: ")
print(clf)
init_time = time()
clf.fit(X_train, y_train)
time_to_train = time() - init_time
print("train time: %0.3fs" % time_to_train)
print("Starting testing: ")
init_time = time()
prediction = clf.predict(X_test)
time_to_test = time() - init_time
print("test time: %0.3fs" % time_to_test)
print("Scoring the model: ")
accuracy = metrics.accuracy_score(y_test, prediction)
print("accuracy: %0.3f" % accuracy)
clf_descr = str(clf).split('(')[0]
return clf_descr, accuracy, time_to_train, time_to_test
all_model_results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="sag"), "Ridge Classifier"),
(Perceptron(max_iter=50, tol=1e-3), "Perceptron"),
(PassiveAggressiveClassifier(max_iter=50, tol=1e-3), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
all_model_results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
all_model_results.append(benchmark(LinearSVC(penalty=penalty, dual=False, tol=1e-3))) # Create and train lib-linear models
all_model_results.append(benchmark(SGDClassifier(alpha=.0001, max_iter=50, penalty=penalty))) # Create and train stochastic gradient models
print('=' * 80)
print("SGD with Elastic-Net")
all_model_results.append(benchmark(SGDClassifier(alpha=.0001, max_iter=50, penalty="elasticnet"))) # Create and train SGD w/ elastic penalty
print('=' * 80)
print("Nearest Centroid")
all_model_results.append(benchmark(NearestCentroid())) # Train NearestCentroid without threshold
print('=' * 80)
print("Naive Bayes (multinomial, bernoulli, and complement)")
all_model_results.append(benchmark(MultinomialNB(alpha=.01))) # Train sparse Naive Bayes classifiers
all_model_results.append(benchmark(BernoulliNB(alpha=.01)))
all_model_results.append(benchmark(ComplementNB(alpha=.1)))
print('=' * 80)
print("Linear SVC with l1")
all_model_results.append(benchmark(Pipeline([('feature_selection', SelectFromModel(LinearSVC(penalty="l1", dual=False, tol=1e-3))),
('classification', LinearSVC(penalty="l2"))])))
|
names_of_features = vectorizer.get_feature_names()
|
conditional_block
|
Run_all_models_modified.py
|
.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import ComplementNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
class
|
(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
# output logs to stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# show how to call hashing function
op = OptionParser()
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
#============================================================================
# Read custom data
#============================================================================
polarity = []
user_tweets = []
balance_classes = 1 # if 0, it only trims class 4, else trims all classes to size cls_num_ext
cls_0_num = 0
cls_1_num = 0
cls_2_num = 0
cls_3_num = 0
cls_4_num = 0
cls_num_ext = 500
num_ext = 0
with open("Categorized_User_Polarity.txt") as inp:
for line in inp:
values = line.split("\t") # id, polarity
user_id = values[0]
user_file = "tokens_lines_test/" + user_id
if os.path.isfile(user_file): # not all user IDs had tweets in the master tweet file
if not balance_classes:
if int(values[1]) == 4:
if num_ext < 2000:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
num_ext += 1
else:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
else:
if int(values[1]) == 0:
if cls_0_num < cls_num_ext:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
cls_0_num += 1
if int(values[1]) == 0:
if cls_1_num < cls_num_ext:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
cls_1_num += 1
if int(values[1]) == 0:
if cls_2_num < cls_num_ext:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
cls_2_num += 1
if int(values[1]) == 0:
if cls_3_num < cls_num_ext:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
cls_3_num += 1
else: # class 4
if cls_4_num < cls_num_ext:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
cls_4_num += 1
#============================================================================
# Split into training and testing sets
#============================================================================
# X y % data used for testing
raw_X_train, raw_X_test, raw_y_train, raw_y_test = train_test_split(user_tweets, polarity, test_size=0.2)
categories = ['far_left','mid_left','neutral','mid_right','far_right'] # for 5 classes
data_train = Bunch()
data_train.data = raw_X_train
data_train.target_names = ['far_left','mid_left','neutral','mid_right','far_right'] # for 5 classes
data_train.target = raw_y_train
data_test = Bunch()
data_test.data = raw_X_test
data_test.target = raw_y_test
target_names = data_train.target_names # Note: order of labels in `target_names` can be different from `categories`
print('data loaded')
#============================================================================
# Create target vectors
#============================================================================
y_train, y_test = data_train.target, data_test.target
init_time = time()
if opts.use_hashing:
print("Using hashing vectorizer")
vectorizer = HashingVectorizer(stop_words='english', alternate_sign=False, n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
print("Using tfidf vectorizer")
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test set")
init_time = time()
X_test = vectorizer.transform(data_test.data)
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from word to a string of tokens
if opts.use_hashing:
names_of_features = None
else:
names_of_features = vectorizer.get_feature_names()
if names_of_features:
names_of_features = np.asarray(names_of_features)
def trim(s):
return s if len(s) <= 80 else s[:77] + "..."
#============================================================================
# Benchmark classifiers
#============================================================================
def benchmark(clf):
print('_' * 80)
print("Starting training: ")
print(clf)
init_time = time()
clf.fit(X_train, y_train)
time_to_train = time() - init_time
print("train time: %0.3fs" % time_to_train)
print("Starting testing: ")
init_time = time()
prediction = clf.predict(X_test)
time_to_test = time() - init_time
print("test time: %0.3fs" % time_to_test)
print("Scoring the model: ")
accuracy = metrics.accuracy_score(y_test, prediction)
print("accuracy: %0.3f" % accuracy)
clf_descr = str(clf).split('(')[0]
return clf_descr, accuracy, time_to_train, time_to_test
all_model_results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="sag"), "Ridge Classifier"),
(Perceptron(max_iter=50, tol=1e-3), "Perceptron"),
(PassiveAggressiveClassifier(max_iter=50, tol=1e-3), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
all_model_results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
all_model_results.append(benchmark(LinearSVC(penalty=penalty, dual=False, tol=1e-3))) # Create and train lib-linear models
all_model_results.append(benchmark(SGDClassifier(alpha=.0001, max_iter=50, penalty=penalty))) # Create and train stochastic gradient models
print('=' * 80)
print("SGD with Elastic-Net")
all_model_results.append(benchmark(SGDClassifier(alpha=.0001, max_iter=50, penalty="elasticnet"))) # Create and train SGD w/ elastic penalty
print('=' * 80)
print("Nearest Centroid")
all_model_results.append(benchmark(NearestCentroid())) # Train NearestCentroid without threshold
print('=' * 80)
print("Naive Bayes (multinomial, bernoulli, and complement)")
all_model_results.append(benchmark(MultinomialNB(alpha=.01))) # Train sparse Naive Bayes classifiers
all_model_results.append(benchmark(BernoulliNB(alpha=.01)))
all_model_results.append(benchmark(ComplementNB(alpha=.1)))
print('=' * 80)
print("Linear SVC with l1")
all_model_results.append(benchmark(Pipeline([('feature_selection', SelectFromModel(LinearSVC(penalty="l1", dual=False, tol=1e-3))),
('classification', LinearSVC(penalty="l2"))])))
|
Bunch
|
identifier_name
|
Run_all_models_modified.py
|
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import SelectKBest
from sklearn.linear_model import RidgeClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import ComplementNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn import metrics
class Bunch(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
# output logs to stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# show how to call hashing function
op = OptionParser()
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
def is_interactive():
return not hasattr(sys.modules['__main__'], '__file__')
argv = [] if is_interactive() else sys.argv[1:]
(opts, args) = op.parse_args(argv)
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
#============================================================================
# Read custom data
#============================================================================
polarity = []
user_tweets = []
balance_classes = 1 # if 0, it only trims class 4, else trims all classes to size cls_num_ext
cls_0_num = 0
cls_1_num = 0
cls_2_num = 0
cls_3_num = 0
cls_4_num = 0
cls_num_ext = 500
num_ext = 0
with open("Categorized_User_Polarity.txt") as inp:
for line in inp:
values = line.split("\t") # id, polarity
user_id = values[0]
user_file = "tokens_lines_test/" + user_id
if os.path.isfile(user_file): # not all user IDs had tweets in the master tweet file
if not balance_classes:
if int(values[1]) == 4:
if num_ext < 2000:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
num_ext += 1
else:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
else:
if int(values[1]) == 0:
if cls_0_num < cls_num_ext:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
cls_0_num += 1
if int(values[1]) == 0:
if cls_1_num < cls_num_ext:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
cls_1_num += 1
if int(values[1]) == 0:
if cls_2_num < cls_num_ext:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
cls_2_num += 1
if int(values[1]) == 0:
if cls_3_num < cls_num_ext:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
cls_3_num += 1
else: # class 4
if cls_4_num < cls_num_ext:
polarity.append(int(values[1])) # save the polarity
with open(user_file, 'r') as inp: # save the
user_tweets.append(inp.read())
cls_4_num += 1
#============================================================================
# Split into training and testing sets
#============================================================================
# X y % data used for testing
raw_X_train, raw_X_test, raw_y_train, raw_y_test = train_test_split(user_tweets, polarity, test_size=0.2)
categories = ['far_left','mid_left','neutral','mid_right','far_right'] # for 5 classes
data_train = Bunch()
data_train.data = raw_X_train
data_train.target_names = ['far_left','mid_left','neutral','mid_right','far_right'] # for 5 classes
data_train.target = raw_y_train
data_test = Bunch()
data_test.data = raw_X_test
data_test.target = raw_y_test
target_names = data_train.target_names # Note: order of labels in `target_names` can be different from `categories`
print('data loaded')
#============================================================================
# Create target vectors
#============================================================================
y_train, y_test = data_train.target, data_test.target
init_time = time()
if opts.use_hashing:
print("Using hashing vectorizer")
vectorizer = HashingVectorizer(stop_words='english', alternate_sign=False, n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
print("Using tfidf vectorizer")
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test set")
init_time = time()
X_test = vectorizer.transform(data_test.data)
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from word to a string of tokens
if opts.use_hashing:
names_of_features = None
else:
names_of_features = vectorizer.get_feature_names()
if names_of_features:
names_of_features = np.asarray(names_of_features)
def trim(s):
return s if len(s) <= 80 else s[:77] + "..."
#============================================================================
# Benchmark classifiers
#============================================================================
def benchmark(clf):
print('_' * 80)
print("Starting training: ")
print(clf)
init_time = time()
clf.fit(X_train, y_train)
time_to_train = time() - init_time
print("train time: %0.3fs" % time_to_train)
print("Starting testing: ")
init_time = time()
prediction = clf.predict(X_test)
time_to_test = time() - init_time
print("test time: %0.3fs" % time_to_test)
print("Scoring the model: ")
accuracy = metrics.accuracy_score(y_test, prediction)
print("accuracy: %0.3f" % accuracy)
clf_descr = str(clf).split('(')[0]
return clf_descr, accuracy, time_to_train, time_to_test
all_model_results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="sag"), "Ridge Classifier"),
(Perceptron(max_iter=50, tol=1e-3), "Perceptron"),
(PassiveAggressiveClassifier(max_iter=50, tol=1e-3), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
all_model_results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
all_model_results.append(benchmark(LinearSVC(penalty=penalty, dual=False, tol=1e-3))) # Create and train lib-linear models
all_model_results.append(benchmark(SGDClassifier(alpha=.0001, max_iter=50, penalty=penalty))) # Create and train stochastic gradient models
print('=' * 80)
print("SGD with Elastic-Net")
all_model_results.append(benchmark(SGDClassifier(alpha=.0001, max_iter=50, penalty="elasticnet"))) # Create and train SGD w/ elastic penalty
print('=' * 80)
print("Nearest Centroid")
all_model_results.append(benchmark(NearestCentroid())) # Train NearestCentroid without threshold
print('=' * 80)
print("Naive Bayes (multinomial, bernoulli, and complement)")
all_model_results.append(benchmark(MultinomialNB(alpha=.01))) # Train sparse Naive Bayes classifiers
all_model_results.append(benchmark(BernoulliNB(alpha=.01)))
all_model_results.append(benchmark(ComplementNB(alpha=.1)))
print('=' * 80)
print("Linear SVC with l1")
|
from sklearn.feature_extraction.text import HashingVectorizer
|
random_line_split
|
|
pod_helper.go
|
}
}
// Important considerations.
// Pending Status in Pod could be for various reasons and sometimes could signal a problem
// Case I: Pending because the Image pull is failing and it is backing off
// This could be transient. So we can actually rely on the failure reason.
// The failure transitions from ErrImagePull -> ImagePullBackoff
// Case II: Not enough resources are available. This is tricky. It could be that the total number of
// resources requested is beyond the capability of the system. for this we will rely on configuration
// and hence input gates. We should not allow bad requests that request for large number of resource through.
// In the case it makes through, we will fail after timeout
func DemystifyPending(status v1.PodStatus) (pluginsCore.PhaseInfo, error) {
// Search over the difference conditions in the status object. Note that the 'Pending' this function is
// demystifying is the 'phase' of the pod status. This is different than the PodReady condition type also used below
for _, c := range status.Conditions {
switch c.Type {
case v1.PodScheduled:
if c.Status == v1.ConditionFalse {
// Waiting to be scheduled. This usually refers to inability to acquire resources.
return pluginsCore.PhaseInfoQueued(c.LastTransitionTime.Time, pluginsCore.DefaultPhaseVersion, fmt.Sprintf("%s:%s", c.Reason, c.Message)), nil
}
case v1.PodReasonUnschedulable:
// We Ignore case in which we are unable to find resources on the cluster. This is because
// - The resources may be not available at the moment, but may become available eventually
// The pod scheduler will keep on looking at this pod and trying to satisfy it.
//
// Pod status looks like this:
// message: '0/1 nodes are available: 1 Insufficient memory.'
// reason: Unschedulable
// status: "False"
// type: PodScheduled
return pluginsCore.PhaseInfoQueued(c.LastTransitionTime.Time, pluginsCore.DefaultPhaseVersion, fmt.Sprintf("%s:%s", c.Reason, c.Message)), nil
case v1.PodReady:
if c.Status == v1.ConditionFalse {
// This happens in the case the image is having some problems. In the following example, K8s is having
// problems downloading an image. To ensure that, we will have to iterate over all the container statuses and
// find if some container has imagepull failure
// e.g.
// - lastProbeTime: null
// lastTransitionTime: 2018-12-18T00:57:30Z
// message: 'containers with unready status: [myapp-container]'
// reason: ContainersNotReady
// status: "False"
// type: Ready
//
// e.g. Container status
// - image: blah
// imageID: ""
// lastState: {}
// name: myapp-container
// ready: false
// restartCount: 0
// state:
// waiting:
// message: Back-off pulling image "blah"
// reason: ImagePullBackOff
for _, containerStatus := range status.ContainerStatuses {
if !containerStatus.Ready {
if containerStatus.State.Waiting != nil {
// There are a variety of reasons that can cause a pod to be in this waiting state.
// Waiting state may be legitimate when the container is being downloaded, started or init containers are running
reason := containerStatus.State.Waiting.Reason
finalReason := fmt.Sprintf("%s|%s", c.Reason, reason)
finalMessage := fmt.Sprintf("%s|%s", c.Message, containerStatus.State.Waiting.Message)
switch reason {
case "ErrImagePull", "ContainerCreating", "PodInitializing":
// But, there are only two "reasons" when a pod is successfully being created and hence it is in
// waiting state
// Refer to https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/kubelet_pods.go
// and look for the default waiting states
// We also want to allow Image pulls to be retried, so ErrImagePull will be ignored
// as it eventually enters into ImagePullBackOff
// ErrImagePull -> Transitionary phase to ImagePullBackOff
// ContainerCreating -> Image is being downloaded
// PodInitializing -> Init containers are running
return pluginsCore.PhaseInfoInitializing(c.LastTransitionTime.Time, pluginsCore.DefaultPhaseVersion, fmt.Sprintf("[%s]: %s", finalReason, finalMessage), &pluginsCore.TaskInfo{OccurredAt: &c.LastTransitionTime.Time}), nil
case "CreateContainerConfigError", "CreateContainerError":
// This happens if for instance the command to the container is incorrect, ie doesn't run
t := c.LastTransitionTime.Time
return pluginsCore.PhaseInfoFailure(finalReason, finalMessage, &pluginsCore.TaskInfo{
OccurredAt: &t,
}), nil
case "ImagePullBackOff":
t := c.LastTransitionTime.Time
return pluginsCore.PhaseInfoRetryableFailure(finalReason, finalMessage, &pluginsCore.TaskInfo{
OccurredAt: &t,
}), nil
default:
// Since we are not checking for all error states, we may end up perpetually
// in the queued state returned at the bottom of this function, until the Pod is reaped
// by K8s and we get elusive 'pod not found' errors
// So be default if the container is not waiting with the PodInitializing/ContainerCreating
// reasons, then we will assume a failure reason, and fail instantly
t := c.LastTransitionTime.Time
return pluginsCore.PhaseInfoSystemRetryableFailure(finalReason, finalMessage, &pluginsCore.TaskInfo{
OccurredAt: &t,
}), nil
}
}
}
}
}
}
}
return pluginsCore.PhaseInfoQueued(time.Now(), pluginsCore.DefaultPhaseVersion, "Scheduling"), nil
}
func DemystifySuccess(status v1.PodStatus, info pluginsCore.TaskInfo) (pluginsCore.PhaseInfo, error) {
for _, status := range append(
append(status.InitContainerStatuses, status.ContainerStatuses...), status.EphemeralContainerStatuses...) {
if status.State.Terminated != nil && strings.Contains(status.State.Terminated.Reason, OOMKilled) {
return pluginsCore.PhaseInfoRetryableFailure("OOMKilled",
"Pod reported success despite being OOMKilled", &info), nil
}
}
return pluginsCore.PhaseInfoSuccess(&info), nil
}
func DeterminePrimaryContainerPhase(primaryContainerName string, statuses []v1.ContainerStatus, info *pluginsCore.TaskInfo) pluginsCore.PhaseInfo {
for _, s := range statuses {
if s.Name == primaryContainerName {
if s.State.Waiting != nil || s.State.Running != nil {
return pluginsCore.PhaseInfoRunning(pluginsCore.DefaultPhaseVersion, info)
}
if s.State.Terminated != nil {
if s.State.Terminated.ExitCode != 0 {
return pluginsCore.PhaseInfoRetryableFailure(
s.State.Terminated.Reason, s.State.Terminated.Message, info)
}
return pluginsCore.PhaseInfoSuccess(info)
}
}
}
// If for some reason we can't find the primary container, always just return a permanent failure
return pluginsCore.PhaseInfoFailure("PrimaryContainerMissing",
fmt.Sprintf("Primary container [%s] not found in pod's container statuses", primaryContainerName), info)
}
func ConvertPodFailureToError(status v1.PodStatus) (code, message string) {
code = "UnknownError"
message = "Pod failed. No message received from kubernetes."
if len(status.Reason) > 0 {
code = status.Reason
}
if len(status.Message) > 0 {
message = status.Message
}
for _, c := range append(
append(status.InitContainerStatuses, status.ContainerStatuses...), status.EphemeralContainerStatuses...) {
var containerState v1.ContainerState
if c.LastTerminationState.Terminated != nil {
containerState = c.LastTerminationState
} else if c.State.Terminated != nil {
containerState = c.State
}
if containerState.Terminated != nil {
if strings.Contains(c.State.Terminated.Reason, OOMKilled) {
code = OOMKilled
} else if containerState.Terminated.ExitCode == SIGKILL {
// in some setups, node termination sends SIGKILL to all the containers running on that node. Capturing and
// tagging that correctly.
code = Interrupted
}
if containerState.Terminated.ExitCode == 0 {
message += fmt.Sprintf("\r\n[%v] terminated with ExitCode 0.", c.Name)
|
} else {
|
random_line_split
|
|
pod_helper.go
|
().DefaultNodeSelector)
if taskExecutionMetadata.IsInterruptible() {
podSpec.NodeSelector = utils.UnionMaps(podSpec.NodeSelector, config.GetK8sPluginConfig().InterruptibleNodeSelector)
}
if podSpec.Affinity == nil {
podSpec.Affinity = config.GetK8sPluginConfig().DefaultAffinity
}
}
func ToK8sPodSpec(ctx context.Context, tCtx pluginsCore.TaskExecutionContext) (*v1.PodSpec, error) {
task, err := tCtx.TaskReader().Read(ctx)
if err != nil {
logger.Warnf(ctx, "failed to read task information when trying to construct Pod, err: %s", err.Error())
return nil, err
}
if task.GetContainer() == nil {
logger.Errorf(ctx, "Default Pod creation logic works for default container in the task template only.")
return nil, fmt.Errorf("container not specified in task template")
}
c, err := ToK8sContainer(ctx, task.GetContainer(), task.Interface, template.Parameters{
Task: tCtx.TaskReader(),
Inputs: tCtx.InputReader(),
OutputPath: tCtx.OutputWriter(),
TaskExecMetadata: tCtx.TaskExecutionMetadata(),
})
if err != nil {
return nil, err
}
containers := []v1.Container{
*c,
}
pod := &v1.PodSpec{
Containers: containers,
}
UpdatePod(tCtx.TaskExecutionMetadata(), []v1.ResourceRequirements{c.Resources}, pod)
if err := AddCoPilotToPod(ctx, config.GetK8sPluginConfig().CoPilot, pod, task.GetInterface(), tCtx.TaskExecutionMetadata(), tCtx.InputReader(), tCtx.OutputWriter(), task.GetContainer().GetDataConfig()); err != nil {
return nil, err
}
return pod, nil
}
func BuildPodWithSpec(podSpec *v1.PodSpec) *v1.Pod {
pod := v1.Pod{
TypeMeta: v12.TypeMeta{
Kind: PodKind,
APIVersion: v1.SchemeGroupVersion.String(),
},
Spec: *podSpec,
}
return &pod
}
func BuildIdentityPod() *v1.Pod {
return &v1.Pod{
TypeMeta: v12.TypeMeta{
Kind: PodKind,
APIVersion: v1.SchemeGroupVersion.String(),
},
}
}
// Important considerations.
// Pending Status in Pod could be for various reasons and sometimes could signal a problem
// Case I: Pending because the Image pull is failing and it is backing off
// This could be transient. So we can actually rely on the failure reason.
// The failure transitions from ErrImagePull -> ImagePullBackoff
// Case II: Not enough resources are available. This is tricky. It could be that the total number of
// resources requested is beyond the capability of the system. for this we will rely on configuration
// and hence input gates. We should not allow bad requests that request for large number of resource through.
// In the case it makes through, we will fail after timeout
func DemystifyPending(status v1.PodStatus) (pluginsCore.PhaseInfo, error) {
// Search over the difference conditions in the status object. Note that the 'Pending' this function is
// demystifying is the 'phase' of the pod status. This is different than the PodReady condition type also used below
for _, c := range status.Conditions {
switch c.Type {
case v1.PodScheduled:
if c.Status == v1.ConditionFalse {
// Waiting to be scheduled. This usually refers to inability to acquire resources.
return pluginsCore.PhaseInfoQueued(c.LastTransitionTime.Time, pluginsCore.DefaultPhaseVersion, fmt.Sprintf("%s:%s", c.Reason, c.Message)), nil
}
case v1.PodReasonUnschedulable:
// We Ignore case in which we are unable to find resources on the cluster. This is because
// - The resources may be not available at the moment, but may become available eventually
// The pod scheduler will keep on looking at this pod and trying to satisfy it.
//
// Pod status looks like this:
// message: '0/1 nodes are available: 1 Insufficient memory.'
// reason: Unschedulable
// status: "False"
// type: PodScheduled
return pluginsCore.PhaseInfoQueued(c.LastTransitionTime.Time, pluginsCore.DefaultPhaseVersion, fmt.Sprintf("%s:%s", c.Reason, c.Message)), nil
case v1.PodReady:
if c.Status == v1.ConditionFalse {
// This happens in the case the image is having some problems. In the following example, K8s is having
// problems downloading an image. To ensure that, we will have to iterate over all the container statuses and
// find if some container has imagepull failure
// e.g.
// - lastProbeTime: null
// lastTransitionTime: 2018-12-18T00:57:30Z
// message: 'containers with unready status: [myapp-container]'
// reason: ContainersNotReady
// status: "False"
// type: Ready
//
// e.g. Container status
// - image: blah
// imageID: ""
// lastState: {}
// name: myapp-container
// ready: false
// restartCount: 0
// state:
// waiting:
// message: Back-off pulling image "blah"
// reason: ImagePullBackOff
for _, containerStatus := range status.ContainerStatuses {
if !containerStatus.Ready {
if containerStatus.State.Waiting != nil {
// There are a variety of reasons that can cause a pod to be in this waiting state.
// Waiting state may be legitimate when the container is being downloaded, started or init containers are running
reason := containerStatus.State.Waiting.Reason
finalReason := fmt.Sprintf("%s|%s", c.Reason, reason)
finalMessage := fmt.Sprintf("%s|%s", c.Message, containerStatus.State.Waiting.Message)
switch reason {
case "ErrImagePull", "ContainerCreating", "PodInitializing":
// But, there are only two "reasons" when a pod is successfully being created and hence it is in
// waiting state
// Refer to https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/kubelet_pods.go
// and look for the default waiting states
// We also want to allow Image pulls to be retried, so ErrImagePull will be ignored
// as it eventually enters into ImagePullBackOff
// ErrImagePull -> Transitionary phase to ImagePullBackOff
// ContainerCreating -> Image is being downloaded
// PodInitializing -> Init containers are running
return pluginsCore.PhaseInfoInitializing(c.LastTransitionTime.Time, pluginsCore.DefaultPhaseVersion, fmt.Sprintf("[%s]: %s", finalReason, finalMessage), &pluginsCore.TaskInfo{OccurredAt: &c.LastTransitionTime.Time}), nil
case "CreateContainerConfigError", "CreateContainerError":
// This happens if for instance the command to the container is incorrect, ie doesn't run
t := c.LastTransitionTime.Time
return pluginsCore.PhaseInfoFailure(finalReason, finalMessage, &pluginsCore.TaskInfo{
OccurredAt: &t,
}), nil
case "ImagePullBackOff":
t := c.LastTransitionTime.Time
return pluginsCore.PhaseInfoRetryableFailure(finalReason, finalMessage, &pluginsCore.TaskInfo{
OccurredAt: &t,
}), nil
default:
// Since we are not checking for all error states, we may end up perpetually
// in the queued state returned at the bottom of this function, until the Pod is reaped
// by K8s and we get elusive 'pod not found' errors
// So be default if the container is not waiting with the PodInitializing/ContainerCreating
// reasons, then we will assume a failure reason, and fail instantly
t := c.LastTransitionTime.Time
return pluginsCore.PhaseInfoSystemRetryableFailure(finalReason, finalMessage, &pluginsCore.TaskInfo{
OccurredAt: &t,
}), nil
}
}
}
}
}
}
}
return pluginsCore.PhaseInfoQueued(time.Now(), pluginsCore.DefaultPhaseVersion, "Scheduling"), nil
}
func DemystifySuccess(status v1.PodStatus, info pluginsCore.TaskInfo) (pluginsCore.PhaseInfo, error) {
for _, status := range append(
append(status.InitContainerStatuses, status.ContainerStatuses...), status.EphemeralContainerStatuses...) {
if status.State.Terminated != nil && strings.Contains(status.State.Terminated.Reason, OOMKilled)
|
{
return pluginsCore.PhaseInfoRetryableFailure("OOMKilled",
"Pod reported success despite being OOMKilled", &info), nil
}
|
conditional_block
|
|
pod_helper.go
|
K8sPluginConfig().SchedulerName
}
podSpec.NodeSelector = utils.UnionMaps(podSpec.NodeSelector, config.GetK8sPluginConfig().DefaultNodeSelector)
if taskExecutionMetadata.IsInterruptible() {
podSpec.NodeSelector = utils.UnionMaps(podSpec.NodeSelector, config.GetK8sPluginConfig().InterruptibleNodeSelector)
}
if podSpec.Affinity == nil {
podSpec.Affinity = config.GetK8sPluginConfig().DefaultAffinity
}
}
func ToK8sPodSpec(ctx context.Context, tCtx pluginsCore.TaskExecutionContext) (*v1.PodSpec, error) {
task, err := tCtx.TaskReader().Read(ctx)
if err != nil {
logger.Warnf(ctx, "failed to read task information when trying to construct Pod, err: %s", err.Error())
return nil, err
}
if task.GetContainer() == nil {
logger.Errorf(ctx, "Default Pod creation logic works for default container in the task template only.")
return nil, fmt.Errorf("container not specified in task template")
}
c, err := ToK8sContainer(ctx, task.GetContainer(), task.Interface, template.Parameters{
Task: tCtx.TaskReader(),
Inputs: tCtx.InputReader(),
OutputPath: tCtx.OutputWriter(),
TaskExecMetadata: tCtx.TaskExecutionMetadata(),
})
if err != nil {
return nil, err
}
containers := []v1.Container{
*c,
}
pod := &v1.PodSpec{
Containers: containers,
}
UpdatePod(tCtx.TaskExecutionMetadata(), []v1.ResourceRequirements{c.Resources}, pod)
if err := AddCoPilotToPod(ctx, config.GetK8sPluginConfig().CoPilot, pod, task.GetInterface(), tCtx.TaskExecutionMetadata(), tCtx.InputReader(), tCtx.OutputWriter(), task.GetContainer().GetDataConfig()); err != nil {
return nil, err
}
return pod, nil
}
func BuildPodWithSpec(podSpec *v1.PodSpec) *v1.Pod {
pod := v1.Pod{
TypeMeta: v12.TypeMeta{
Kind: PodKind,
APIVersion: v1.SchemeGroupVersion.String(),
},
Spec: *podSpec,
}
return &pod
}
func BuildIdentityPod() *v1.Pod {
return &v1.Pod{
TypeMeta: v12.TypeMeta{
Kind: PodKind,
APIVersion: v1.SchemeGroupVersion.String(),
},
}
}
// Important considerations.
// Pending Status in Pod could be for various reasons and sometimes could signal a problem
// Case I: Pending because the Image pull is failing and it is backing off
// This could be transient. So we can actually rely on the failure reason.
// The failure transitions from ErrImagePull -> ImagePullBackoff
// Case II: Not enough resources are available. This is tricky. It could be that the total number of
// resources requested is beyond the capability of the system. for this we will rely on configuration
// and hence input gates. We should not allow bad requests that request for large number of resource through.
// In the case it makes through, we will fail after timeout
func DemystifyPending(status v1.PodStatus) (pluginsCore.PhaseInfo, error) {
// Search over the difference conditions in the status object. Note that the 'Pending' this function is
// demystifying is the 'phase' of the pod status. This is different than the PodReady condition type also used below
for _, c := range status.Conditions {
switch c.Type {
case v1.PodScheduled:
if c.Status == v1.ConditionFalse {
// Waiting to be scheduled. This usually refers to inability to acquire resources.
return pluginsCore.PhaseInfoQueued(c.LastTransitionTime.Time, pluginsCore.DefaultPhaseVersion, fmt.Sprintf("%s:%s", c.Reason, c.Message)), nil
}
case v1.PodReasonUnschedulable:
// We Ignore case in which we are unable to find resources on the cluster. This is because
// - The resources may be not available at the moment, but may become available eventually
// The pod scheduler will keep on looking at this pod and trying to satisfy it.
//
// Pod status looks like this:
// message: '0/1 nodes are available: 1 Insufficient memory.'
// reason: Unschedulable
// status: "False"
// type: PodScheduled
return pluginsCore.PhaseInfoQueued(c.LastTransitionTime.Time, pluginsCore.DefaultPhaseVersion, fmt.Sprintf("%s:%s", c.Reason, c.Message)), nil
case v1.PodReady:
if c.Status == v1.ConditionFalse {
// This happens in the case the image is having some problems. In the following example, K8s is having
// problems downloading an image. To ensure that, we will have to iterate over all the container statuses and
// find if some container has imagepull failure
// e.g.
// - lastProbeTime: null
// lastTransitionTime: 2018-12-18T00:57:30Z
// message: 'containers with unready status: [myapp-container]'
// reason: ContainersNotReady
// status: "False"
// type: Ready
//
// e.g. Container status
// - image: blah
// imageID: ""
// lastState: {}
// name: myapp-container
// ready: false
// restartCount: 0
// state:
// waiting:
// message: Back-off pulling image "blah"
// reason: ImagePullBackOff
for _, containerStatus := range status.ContainerStatuses {
if !containerStatus.Ready {
if containerStatus.State.Waiting != nil {
// There are a variety of reasons that can cause a pod to be in this waiting state.
// Waiting state may be legitimate when the container is being downloaded, started or init containers are running
reason := containerStatus.State.Waiting.Reason
finalReason := fmt.Sprintf("%s|%s", c.Reason, reason)
finalMessage := fmt.Sprintf("%s|%s", c.Message, containerStatus.State.Waiting.Message)
switch reason {
case "ErrImagePull", "ContainerCreating", "PodInitializing":
// But, there are only two "reasons" when a pod is successfully being created and hence it is in
// waiting state
// Refer to https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/kubelet_pods.go
// and look for the default waiting states
// We also want to allow Image pulls to be retried, so ErrImagePull will be ignored
// as it eventually enters into ImagePullBackOff
// ErrImagePull -> Transitionary phase to ImagePullBackOff
// ContainerCreating -> Image is being downloaded
// PodInitializing -> Init containers are running
return pluginsCore.PhaseInfoInitializing(c.LastTransitionTime.Time, pluginsCore.DefaultPhaseVersion, fmt.Sprintf("[%s]: %s", finalReason, finalMessage), &pluginsCore.TaskInfo{OccurredAt: &c.LastTransitionTime.Time}), nil
case "CreateContainerConfigError", "CreateContainerError":
// This happens if for instance the command to the container is incorrect, ie doesn't run
t := c.LastTransitionTime.Time
return pluginsCore.PhaseInfoFailure(finalReason, finalMessage, &pluginsCore.TaskInfo{
OccurredAt: &t,
}), nil
case "ImagePullBackOff":
t := c.LastTransitionTime.Time
return pluginsCore.PhaseInfoRetryableFailure(finalReason, finalMessage, &pluginsCore.TaskInfo{
OccurredAt: &t,
}), nil
default:
// Since we are not checking for all error states, we may end up perpetually
// in the queued state returned at the bottom of this function, until the Pod is reaped
// by K8s and we get elusive 'pod not found' errors
// So be default if the container is not waiting with the PodInitializing/ContainerCreating
// reasons, then we will assume a failure reason, and fail instantly
t := c.LastTransitionTime.Time
return pluginsCore.PhaseInfoSystemRetryableFailure(finalReason, finalMessage, &pluginsCore.TaskInfo{
OccurredAt: &t,
}), nil
}
}
}
}
}
}
}
return pluginsCore.PhaseInfoQueued(time.Now(), pluginsCore.DefaultPhaseVersion, "Scheduling"), nil
}
func
|
(status v1.PodStatus, info pluginsCore.TaskInfo) (pluginsCore.PhaseInfo, error) {
for _, status := range append(
append(status.InitContainerStatuses, status.ContainerStatuses...), status.EphemeralContainerStatuses...) {
if status.State.Terminated != nil && strings.Contains(status.State.Terminated.Reason, OOMKilled) {
return
|
DemystifySuccess
|
identifier_name
|
pod_helper.go
|
func BuildPodWithSpec(podSpec *v1.PodSpec) *v1.Pod {
pod := v1.Pod{
TypeMeta: v12.TypeMeta{
Kind: PodKind,
APIVersion: v1.SchemeGroupVersion.String(),
},
Spec: *podSpec,
}
return &pod
}
func BuildIdentityPod() *v1.Pod {
return &v1.Pod{
TypeMeta: v12.TypeMeta{
Kind: PodKind,
APIVersion: v1.SchemeGroupVersion.String(),
},
}
}
// Important considerations.
// Pending Status in Pod could be for various reasons and sometimes could signal a problem
// Case I: Pending because the Image pull is failing and it is backing off
// This could be transient. So we can actually rely on the failure reason.
// The failure transitions from ErrImagePull -> ImagePullBackoff
// Case II: Not enough resources are available. This is tricky. It could be that the total number of
// resources requested is beyond the capability of the system. for this we will rely on configuration
// and hence input gates. We should not allow bad requests that request for large number of resource through.
// In the case it makes through, we will fail after timeout
func DemystifyPending(status v1.PodStatus) (pluginsCore.PhaseInfo, error) {
// Search over the difference conditions in the status object. Note that the 'Pending' this function is
// demystifying is the 'phase' of the pod status. This is different than the PodReady condition type also used below
for _, c := range status.Conditions {
switch c.Type {
case v1.PodScheduled:
if c.Status == v1.ConditionFalse {
// Waiting to be scheduled. This usually refers to inability to acquire resources.
return pluginsCore.PhaseInfoQueued(c.LastTransitionTime.Time, pluginsCore.DefaultPhaseVersion, fmt.Sprintf("%s:%s", c.Reason, c.Message)), nil
}
case v1.PodReasonUnschedulable:
// We Ignore case in which we are unable to find resources on the cluster. This is because
// - The resources may be not available at the moment, but may become available eventually
// The pod scheduler will keep on looking at this pod and trying to satisfy it.
//
// Pod status looks like this:
// message: '0/1 nodes are available: 1 Insufficient memory.'
// reason: Unschedulable
// status: "False"
// type: PodScheduled
return pluginsCore.PhaseInfoQueued(c.LastTransitionTime.Time, pluginsCore.DefaultPhaseVersion, fmt.Sprintf("%s:%s", c.Reason, c.Message)), nil
case v1.PodReady:
if c.Status == v1.ConditionFalse {
// This happens in the case the image is having some problems. In the following example, K8s is having
// problems downloading an image. To ensure that, we will have to iterate over all the container statuses and
// find if some container has imagepull failure
// e.g.
// - lastProbeTime: null
// lastTransitionTime: 2018-12-18T00:57:30Z
// message: 'containers with unready status: [myapp-container]'
// reason: ContainersNotReady
// status: "False"
// type: Ready
//
// e.g. Container status
// - image: blah
// imageID: ""
// lastState: {}
// name: myapp-container
// ready: false
// restartCount: 0
// state:
// waiting:
// message: Back-off pulling image "blah"
// reason: ImagePullBackOff
for _, containerStatus := range status.ContainerStatuses {
if !containerStatus.Ready {
if containerStatus.State.Waiting != nil {
// There are a variety of reasons that can cause a pod to be in this waiting state.
// Waiting state may be legitimate when the container is being downloaded, started or init containers are running
reason := containerStatus.State.Waiting.Reason
finalReason := fmt.Sprintf("%s|%s", c.Reason, reason)
finalMessage := fmt.Sprintf("%s|%s", c.Message, containerStatus.State.Waiting.Message)
switch reason {
case "ErrImagePull", "ContainerCreating", "PodInitializing":
// But, there are only two "reasons" when a pod is successfully being created and hence it is in
// waiting state
// Refer to https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/kubelet_pods.go
// and look for the default waiting states
// We also want to allow Image pulls to be retried, so ErrImagePull will be ignored
// as it eventually enters into ImagePullBackOff
// ErrImagePull -> Transitionary phase to ImagePullBackOff
// ContainerCreating -> Image is being downloaded
// PodInitializing -> Init containers are running
return pluginsCore.PhaseInfoInitializing(c.LastTransitionTime.Time, pluginsCore.DefaultPhaseVersion, fmt.Sprintf("[%s]: %s", finalReason, finalMessage), &pluginsCore.TaskInfo{OccurredAt: &c.LastTransitionTime.Time}), nil
case "CreateContainerConfigError", "CreateContainerError":
// This happens if for instance the command to the container is incorrect, ie doesn't run
t := c.LastTransitionTime.Time
return pluginsCore.PhaseInfoFailure(finalReason, finalMessage, &pluginsCore.TaskInfo{
OccurredAt: &t,
}), nil
case "ImagePullBackOff":
t := c.LastTransitionTime.Time
return pluginsCore.PhaseInfoRetryableFailure(finalReason, finalMessage, &pluginsCore.TaskInfo{
OccurredAt: &t,
}), nil
default:
// Since we are not checking for all error states, we may end up perpetually
// in the queued state returned at the bottom of this function, until the Pod is reaped
// by K8s and we get elusive 'pod not found' errors
// So be default if the container is not waiting with the PodInitializing/ContainerCreating
// reasons, then we will assume a failure reason, and fail instantly
t := c.LastTransitionTime.Time
return pluginsCore.PhaseInfoSystemRetryableFailure(finalReason, finalMessage, &pluginsCore.TaskInfo{
OccurredAt: &t,
}), nil
}
}
}
}
}
}
}
return pluginsCore.PhaseInfoQueued(time.Now(), pluginsCore.DefaultPhaseVersion, "Scheduling"), nil
}
func DemystifySuccess(status v1.PodStatus, info pluginsCore.TaskInfo) (pluginsCore.PhaseInfo, error) {
for _, status := range append(
append(status.InitContainerStatuses, status.ContainerStatuses...), status.EphemeralContainerStatuses...) {
if status.State.Terminated != nil && strings.Contains(status.State.Terminated.Reason, OOMKilled) {
return pluginsCore.PhaseInfoRetryableFailure("OOMKilled",
"Pod reported success despite being OOMKilled", &info), nil
}
}
return pluginsCore.PhaseInfoSuccess(&info), nil
}
func DeterminePrimaryContainerPhase(primaryContainerName string, statuses []v1.ContainerStatus, info *pluginsCore.TaskInfo) pluginsCore.PhaseInfo {
for _, s := range statuses {
if s.Name == primaryContainerName {
if s.State.Waiting != nil || s.State.Running != nil {
return pluginsCore.PhaseInfoRunning(pluginsCore.DefaultPhaseVersion, info)
}
if s.State.Terminated != nil {
if s.State.Terminated.ExitCode != 0 {
return pluginsCore.PhaseInfoRetryableFailure(
s.State.Terminated.Reason, s.State.Terminated.Message, info)
}
return pluginsCore.PhaseInfoSuccess(info)
}
}
}
// If for some reason we can't find the primary container, always just return a permanent failure
return pluginsCore.PhaseInfoFailure("PrimaryContainerMissing",
fmt.Sprintf("Primary container [%s] not found in pod's container statuses", primaryContainerName), info)
}
func ConvertPodFailureToError(status v1.PodStatus) (code, message string)
|
{
code = "UnknownError"
message = "Pod failed. No message received from kubernetes."
if len(status.Reason) > 0 {
code = status.Reason
}
if len(status.Message) > 0 {
message = status.Message
}
for _, c := range append(
append(status.InitContainerStatuses, status.ContainerStatuses...), status.EphemeralContainerStatuses...) {
var containerState v1.ContainerState
if c.LastTerminationState.Terminated != nil {
containerState = c.LastTerminationState
} else if c.State.Terminated != nil {
containerState = c.State
}
if containerState.Terminated != nil {
|
identifier_body
|
|
searchByName.js
|
vm.search = search;
vm.searchParamsObject = {};
// wenn iOs-App dann wird eine extra iOS-Css-Klasse benötigt im bar-header
vm.isIosApp = (CONFIG.environment == 'app' && CONFIG.deviceOs == 'iOS') ? true : false;
vm.urlPrefix = CONFIG.urlPrefix;
vm.isTablet = (CONFIG.deviceType == 'tablet') ? true : false;
vm.hideNavBar = (CONFIG.deviceOrientation == 'landscape' && vm.isTablet) ? true : false;
vm.removeFocusFromInput = removeFocusFromInput;
vm.checkKey= checkKey;
vm.bestMatchSuggest = {};
vm.clearTextInput = clearTextInput;
vm.lastSearchPersonSuggest = false;
////////////
$scope.$on('$ionicView.afterEnter', function() {
// set last search object
CacheFactory.getFromCache('searchCache').then(function(searchCache) {
vm.lastSearchPersonSuggest = CacheFactory.lastSearchPersonSuggest(searchCache);
}, function() {
// empty cache
vm.lastSearchPersonSuggest = false;
});
LoaderFactory.hideLoader();
// add event listener
OrientationchangeFactory.initListener();
// set back button
window.setTimeout(function() {
JamHelperFactory.setBackButton('burger');
}, CONFIG.backButtonDelay);
// GATRACKING
AnalyticsHelper.trackPageview('/namenssuche/');
// set canonical
JamHelperFactory.setCanonical('https://www.jameda.de/empfehlen/', true);
// reset back button cache
JamHelperFactory.setIntoCache('profileBackParams', {});
});
$scope.$on('$ionicView.beforeLeave', function() {
JamHelperFactory.resetBackButton();
});
function checkKey(type) {
// Keyboard wurde gedrückt (13 = Enter, 27 = Escape)
if (event.keyCode == 13 || event.keyCode == 27) {
hideKeyboard();
if (type == 'what') {
vm.setChosenSuggestItem({
searchType: vm.bestMatchSuggest.searchType,
inputItem: vm.bestMatchSuggest.was,
selectedItem: vm.bestMatchSuggest.was,
gruppe_fach_param: vm.bestMatchSuggest.gruppe_fach_param,
was_sel: vm.bestMatchSuggest.was_sel
});
}
}
}
function setBestMatchSuggest(type, data, input) {
vm.bestMatchSuggest = {};
if (type == 'what') {
if (data && typeof data[0] !== 'undefined') {
// Best Matches finden
vm.bestMatchSuggest.searchType = 'what';
vm.bestMatchSuggest.was_sel = 1;
if (data[0].header == 'Fachbereiche') {
vm.bestMatchSuggest.was = data[0].list[0].term;
vm.bestMatchSuggest.gruppe_fach_param = data[0].list[0].select;
} else if (data[0].header == 'Namen') {
vm.bestMatchSuggest.was = input.what;
vm.bestMatchSuggest.namen = input.what;
}
}
if ((typeof data === 'undefined' || data == '') && input != '') {
vm.bestMatchSuggest.searchType = 'what';
vm.bestMatchSuggest.was = input.what;
vm.bestMatchSuggest.namen = input.what;
vm.bestMatchSuggest.was_sel = 1;
}
}
}
function removeFocusFromInput() {
hideKeyboard();
// Höhe für scrollbaren Bereich für den Suggest setzen
setScrollHeight();
}
function hideKeyboard() {
ionic.DomUtil.blurAll();
}
function clearTextInput() {
// letzte Eingabe temporär zwischenspeichern
vm.searchInputTemp.what = vm.searchInput.what;
// Eingabe löschen
vm.searchInput.what = "";
}
function setScrollHeight() {
$timeout(function () {
var suggestScrollArea = $('ion-nav-view[name="menuContent"] ion-view[nav-view="active"] #suggest-scroll-area');
// Only on android tablets
if (vm.isTablet && vm.deviceOs == 'Android') {
var tmpHeight = $('ion-view[nav-view="active"] .jam-suggest-box-tablet').height();
if (tmpHeight > CONFIG.windowHeight) {
suggestScrollArea.height(CONFIG.windowHeight - 75);
} else {
suggestScrollArea.height(CONFIG.windowHeight - 124);
}
} else {
if (vm.isTablet) {
suggestScrollArea.height(CONFIG.windowHeight - 124);
} else {
suggestScrollArea.height(CONFIG.windowHeight - 75);
}
}
}, 100);
}
function controlInputFieldPosition(type) {
/* */
if(type == 'what') {
// GATRACKING
if (vm.showWhereInputField) {
AnalyticsHelper.trackEvent('Namenssuche - Klicks', 'Was Wen Suchleiste geklickt');
}
$ionicNavBarDelegate.showBar(false);
// zeige Was-Input
vm.showWhatInputField = true;
vm.showWhatSuggest = true;
/// verstecke Wo-Input
vm.showWhereInputField = false;
vm.showWhereSuggest = false;
// zeige Abbrechen Button
vm.showWhatCancel = true;
// letzte Eingabe temporär zwischenspeichern
vm.searchInputTemp.what = vm.searchInput.what;
// Suggest-Daten holen mit Eingabe
if(vm.searchInputTemp.what) {
vm.getSuggestData(type);
}
// Höhe für scrollbaren Bereich für den Suggest setzen
setScrollHeight();
}
if(type == 'cancelWhat' || type == 'cancelWhere') {
if (CONFIG.deviceType == 'phone' || CONFIG.deviceOrientation != 'landscape') {
$ionicNavBarDelegate.showBar(true);
}
// zeige Wo-Input
vm.showWhereInputField = true;
vm.showWhereSuggest = false;
// zeige Was-Input
vm.showWhatInputField = true;
vm.showWhatSuggest = false;
// verstecke Abbrechen Buttons
vm.showWhatCancel = false;
vm.showWhereCancel = false;
// Immer dann wenn die Eingabe sich verändert hat
// und auf Abbrechen geklickt,
// erste Eingabe zurück schreiben
if(type == 'cancelWhat') {
if(vm.searchInput.what != vm.searchInputTemp.what) {
vm.searchInput.what = vm.searchInputTemp.what;
}
}
if(type == 'cancelWhere') {
if(vm.searchInput.where != vm.searchInputTemp.where) {
vm.searchInput.where = vm.searchInputTemp.where;
}
}
}
if(type == 'all') {
$ionic
|
nction setSearchParamsObject(params) {
// Suche über Was / Wen
if (params.searchType == "what") {
vm.searchParamsObject.was = params.selectedItem;
vm.searchParamsObject.was_i = params.inputItem;
vm.searchParamsObject.was_sel = (typeof params.was_sel !== 'undefined' && params.was_sel) ? params.was_sel : 0;
if (typeof params.gruppe_fach_param != 'undefined') {
var params = params.gruppe_fach_param.split("&");
for (var i=0; i<params.length; i++) {
var pair = params[i].split("=");
vm.searchParamsObject[pair[0]] = pair[1];
}
vm.searchParamsObject.namen = '';
} else if (typeof params.selectedItem != 'undefined' && typeof params.deeplink == 'undefined') {
vm.searchParamsObject.namen = params.selectedItem;
vm.searchParamsObject.gruppe_fach_param = '';
} else if (typeof params.deeplink != 'undefined' && params.deeplink != '' && typeof params.selectedItem != 'undefined') {
var customerId = JamHelperFactory.strReplace('/profil/', '', params.deeplink);
customerId = JamHelperFactory.strReplace('?beta=1', '', customerId);
var customerIdClean = customerId.split("_")[0];
if (typeof params.multi !== 'undefined' && params.multi.length > 1) {
// mehrere Stand
|
NavBarDelegate.showBar(true);
// zeige Wo-Input
vm.showWhereInputField = true;
vm.showWhereSuggest = false;
// zeige Was-Input
vm.showWhatInputField = true;
vm.showWhatSuggest = false;
// verstecke Abbrechen Buttons
vm.showWhatCancel = false;
vm.showWhereCancel = false;
}
}
fu
|
conditional_block
|
searchByName.js
|
.trackPageview('/namenssuche/');
// set canonical
JamHelperFactory.setCanonical('https://www.jameda.de/empfehlen/', true);
// reset back button cache
JamHelperFactory.setIntoCache('profileBackParams', {});
});
$scope.$on('$ionicView.beforeLeave', function() {
JamHelperFactory.resetBackButton();
});
function checkKey(type) {
// Keyboard wurde gedrückt (13 = Enter, 27 = Escape)
if (event.keyCode == 13 || event.keyCode == 27) {
hideKeyboard();
if (type == 'what') {
vm.setChosenSuggestItem({
searchType: vm.bestMatchSuggest.searchType,
inputItem: vm.bestMatchSuggest.was,
selectedItem: vm.bestMatchSuggest.was,
gruppe_fach_param: vm.bestMatchSuggest.gruppe_fach_param,
was_sel: vm.bestMatchSuggest.was_sel
});
}
}
}
function setBestMatchSuggest(type, data, input) {
vm.bestMatchSuggest = {};
if (type == 'what') {
if (data && typeof data[0] !== 'undefined') {
// Best Matches finden
vm.bestMatchSuggest.searchType = 'what';
vm.bestMatchSuggest.was_sel = 1;
if (data[0].header == 'Fachbereiche') {
vm.bestMatchSuggest.was = data[0].list[0].term;
vm.bestMatchSuggest.gruppe_fach_param = data[0].list[0].select;
} else if (data[0].header == 'Namen') {
vm.bestMatchSuggest.was = input.what;
vm.bestMatchSuggest.namen = input.what;
}
}
if ((typeof data === 'undefined' || data == '') && input != '') {
vm.bestMatchSuggest.searchType = 'what';
vm.bestMatchSuggest.was = input.what;
vm.bestMatchSuggest.namen = input.what;
vm.bestMatchSuggest.was_sel = 1;
}
}
}
function removeFocusFromInput() {
hideKeyboard();
// Höhe für scrollbaren Bereich für den Suggest setzen
setScrollHeight();
}
function hideKeyboard() {
ionic.DomUtil.blurAll();
}
function clearTextInput() {
// letzte Eingabe temporär zwischenspeichern
vm.searchInputTemp.what = vm.searchInput.what;
// Eingabe löschen
vm.searchInput.what = "";
}
function setScrollHeight() {
$timeout(function () {
var suggestScrollArea = $('ion-nav-view[name="menuContent"] ion-view[nav-view="active"] #suggest-scroll-area');
// Only on android tablets
if (vm.isTablet && vm.deviceOs == 'Android') {
var tmpHeight = $('ion-view[nav-view="active"] .jam-suggest-box-tablet').height();
if (tmpHeight > CONFIG.windowHeight) {
suggestScrollArea.height(CONFIG.windowHeight - 75);
} else {
suggestScrollArea.height(CONFIG.windowHeight - 124);
}
} else {
if (vm.isTablet) {
suggestScrollArea.height(CONFIG.windowHeight - 124);
} else {
suggestScrollArea.height(CONFIG.windowHeight - 75);
}
}
}, 100);
}
function controlInputFieldPosition(type) {
/* */
if(type == 'what') {
// GATRACKING
if (vm.showWhereInputField) {
AnalyticsHelper.trackEvent('Namenssuche - Klicks', 'Was Wen Suchleiste geklickt');
}
$ionicNavBarDelegate.showBar(false);
// zeige Was-Input
vm.showWhatInputField = true;
vm.showWhatSuggest = true;
/// verstecke Wo-Input
vm.showWhereInputField = false;
vm.showWhereSuggest = false;
// zeige Abbrechen Button
vm.showWhatCancel = true;
// letzte Eingabe temporär zwischenspeichern
vm.searchInputTemp.what = vm.searchInput.what;
// Suggest-Daten holen mit Eingabe
if(vm.searchInputTemp.what) {
vm.getSuggestData(type);
}
// Höhe für scrollbaren Bereich für den Suggest setzen
setScrollHeight();
}
if(type == 'cancelWhat' || type == 'cancelWhere') {
if (CONFIG.deviceType == 'phone' || CONFIG.deviceOrientation != 'landscape') {
$ionicNavBarDelegate.showBar(true);
}
// zeige Wo-Input
vm.showWhereInputField = true;
vm.showWhereSuggest = false;
// zeige Was-Input
vm.showWhatInputField = true;
vm.showWhatSuggest = false;
// verstecke Abbrechen Buttons
vm.showWhatCancel = false;
vm.showWhereCancel = false;
// Immer dann wenn die Eingabe sich verändert hat
// und auf Abbrechen geklickt,
// erste Eingabe zurück schreiben
if(type == 'cancelWhat') {
if(vm.searchInput.what != vm.searchInputTemp.what) {
vm.searchInput.what = vm.searchInputTemp.what;
}
}
if(type == 'cancelWhere') {
if(vm.searchInput.where != vm.searchInputTemp.where) {
vm.searchInput.where = vm.searchInputTemp.where;
}
}
}
if(type == 'all') {
$ionicNavBarDelegate.showBar(true);
// zeige Wo-Input
vm.showWhereInputField = true;
vm.showWhereSuggest = false;
// zeige Was-Input
vm.showWhatInputField = true;
vm.showWhatSuggest = false;
// verstecke Abbrechen Buttons
vm.showWhatCancel = false;
vm.showWhereCancel = false;
}
}
function setSearchParamsObject(params) {
// Suche über Was / Wen
if (params.searchType == "what") {
vm.searchParamsObject.was = params.selectedItem;
vm.searchParamsObject.was_i = params.inputItem;
vm.searchParamsObject.was_sel = (typeof params.was_sel !== 'undefined' && params.was_sel) ? params.was_sel : 0;
if (typeof params.gruppe_fach_param != 'undefined') {
var params = params.gruppe_fach_param.split("&");
for (var i=0; i<params.length; i++) {
var pair = params[i].split("=");
vm.searchParamsObject[pair[0]] = pair[1];
}
vm.searchParamsObject.namen = '';
} else if (typeof params.selectedItem != 'undefined' && typeof params.deeplink == 'undefined') {
vm.searchParamsObject.namen = params.selectedItem;
vm.searchParamsObject.gruppe_fach_param = '';
} else if (typeof params.deeplink != 'undefined' && params.deeplink != '' && typeof params.selectedItem != 'undefined') {
var customerId = JamHelperFactory.strReplace('/profil/', '', params.deeplink);
customerId = JamHelperFactory.strReplace('?beta=1', '', customerId);
var customerIdClean = customerId.split("_")[0];
if (typeof params.multi !== 'undefined' && params.multi.length > 1) {
// mehrere Standorte, direkt auf die Auswahlseite leiten
$state.go('searchResultListSub', {
refId: customerIdClean,
isSearch: true
});
} else {
// direkt auf das Profil leiten, Beispiel: /profil/uebersicht/45897203100_2/
$state.go('profile', {
fullRefId: customerId,
path: 'profil',
backLinkType: 'deeplink',
isSearch: true
});
}
}
}
// Personen-Suche aus dem Cache aufrufen
if (params.searchType == "personSearchFromCache") {
// Profil
if (typeof params.objFromCache.what_name_nice.refId !== 'undefined' && params.objFromCache.what_name_nice.refId != '') {
// direkt auf das Profil leiten
$state.go('profile', {
fullRefId: params.objFromCache.what_name_nice.refId,
path: 'profil',
backLinkType: 'deeplink',
isSearch: true
});
}
}
}
function setChosenSuggestItem(params) {
// GATRACKING
AnalyticsHelper.trackEvent('Namenssuche - Klicks', 'Namenssuggest geklickt');
|
/* */
setSearchParamsObject(params);
|
random_line_split
|
|
searchByName.js
|
vm.search = search;
vm.searchParamsObject = {};
// wenn iOs-App dann wird eine extra iOS-Css-Klasse benötigt im bar-header
vm.isIosApp = (CONFIG.environment == 'app' && CONFIG.deviceOs == 'iOS') ? true : false;
vm.urlPrefix = CONFIG.urlPrefix;
vm.isTablet = (CONFIG.deviceType == 'tablet') ? true : false;
vm.hideNavBar = (CONFIG.deviceOrientation == 'landscape' && vm.isTablet) ? true : false;
vm.removeFocusFromInput = removeFocusFromInput;
vm.checkKey= checkKey;
vm.bestMatchSuggest = {};
vm.clearTextInput = clearTextInput;
vm.lastSearchPersonSuggest = false;
////////////
$scope.$on('$ionicView.afterEnter', function() {
// set last search object
CacheFactory.getFromCache('searchCache').then(function(searchCache) {
vm.lastSearchPersonSuggest = CacheFactory.lastSearchPersonSuggest(searchCache);
}, function() {
// empty cache
vm.lastSearchPersonSuggest = false;
});
LoaderFactory.hideLoader();
// add event listener
OrientationchangeFactory.initListener();
// set back button
window.setTimeout(function() {
JamHelperFactory.setBackButton('burger');
}, CONFIG.backButtonDelay);
// GATRACKING
AnalyticsHelper.trackPageview('/namenssuche/');
// set canonical
JamHelperFactory.setCanonical('https://www.jameda.de/empfehlen/', true);
// reset back button cache
JamHelperFactory.setIntoCache('profileBackParams', {});
});
$scope.$on('$ionicView.beforeLeave', function() {
JamHelperFactory.resetBackButton();
});
function checkKey(type) {
// Keyboard wurde gedrückt (13 = Enter, 27 = Escape)
if (event.keyCode == 13 || event.keyCode == 27) {
hideKeyboard();
if (type == 'what') {
vm.setChosenSuggestItem({
searchType: vm.bestMatchSuggest.searchType,
inputItem: vm.bestMatchSuggest.was,
selectedItem: vm.bestMatchSuggest.was,
gruppe_fach_param: vm.bestMatchSuggest.gruppe_fach_param,
was_sel: vm.bestMatchSuggest.was_sel
});
}
}
}
function setBestMatchSuggest(type, data, input) {
vm.bestMatchSuggest = {};
if (type == 'what') {
if (data && typeof data[0] !== 'undefined') {
// Best Matches finden
vm.bestMatchSuggest.searchType = 'what';
vm.bestMatchSuggest.was_sel = 1;
if (data[0].header == 'Fachbereiche') {
vm.bestMatchSuggest.was = data[0].list[0].term;
vm.bestMatchSuggest.gruppe_fach_param = data[0].list[0].select;
} else if (data[0].header == 'Namen') {
vm.bestMatchSuggest.was = input.what;
vm.bestMatchSuggest.namen = input.what;
}
}
if ((typeof data === 'undefined' || data == '') && input != '') {
vm.bestMatchSuggest.searchType = 'what';
vm.bestMatchSuggest.was = input.what;
vm.bestMatchSuggest.namen = input.what;
vm.bestMatchSuggest.was_sel = 1;
}
}
}
function removeFocusFromInput() {
hideKeyboard();
// Höhe für scrollbaren Bereich für den Suggest setzen
setScrollHeight();
}
function hideKeyboard() {
ionic.DomUtil.blurAll();
}
function clearTextInput() {
// letzte Eingabe temporär zwischenspeichern
vm.searchInputTemp.what = vm.searchInput.what;
// Eingabe löschen
vm.searchInput.what = "";
}
function setScro
|
$timeout(function () {
var suggestScrollArea = $('ion-nav-view[name="menuContent"] ion-view[nav-view="active"] #suggest-scroll-area');
// Only on android tablets
if (vm.isTablet && vm.deviceOs == 'Android') {
var tmpHeight = $('ion-view[nav-view="active"] .jam-suggest-box-tablet').height();
if (tmpHeight > CONFIG.windowHeight) {
suggestScrollArea.height(CONFIG.windowHeight - 75);
} else {
suggestScrollArea.height(CONFIG.windowHeight - 124);
}
} else {
if (vm.isTablet) {
suggestScrollArea.height(CONFIG.windowHeight - 124);
} else {
suggestScrollArea.height(CONFIG.windowHeight - 75);
}
}
}, 100);
}
function controlInputFieldPosition(type) {
/* */
if(type == 'what') {
// GATRACKING
if (vm.showWhereInputField) {
AnalyticsHelper.trackEvent('Namenssuche - Klicks', 'Was Wen Suchleiste geklickt');
}
$ionicNavBarDelegate.showBar(false);
// zeige Was-Input
vm.showWhatInputField = true;
vm.showWhatSuggest = true;
/// verstecke Wo-Input
vm.showWhereInputField = false;
vm.showWhereSuggest = false;
// zeige Abbrechen Button
vm.showWhatCancel = true;
// letzte Eingabe temporär zwischenspeichern
vm.searchInputTemp.what = vm.searchInput.what;
// Suggest-Daten holen mit Eingabe
if(vm.searchInputTemp.what) {
vm.getSuggestData(type);
}
// Höhe für scrollbaren Bereich für den Suggest setzen
setScrollHeight();
}
if(type == 'cancelWhat' || type == 'cancelWhere') {
if (CONFIG.deviceType == 'phone' || CONFIG.deviceOrientation != 'landscape') {
$ionicNavBarDelegate.showBar(true);
}
// zeige Wo-Input
vm.showWhereInputField = true;
vm.showWhereSuggest = false;
// zeige Was-Input
vm.showWhatInputField = true;
vm.showWhatSuggest = false;
// verstecke Abbrechen Buttons
vm.showWhatCancel = false;
vm.showWhereCancel = false;
// Immer dann wenn die Eingabe sich verändert hat
// und auf Abbrechen geklickt,
// erste Eingabe zurück schreiben
if(type == 'cancelWhat') {
if(vm.searchInput.what != vm.searchInputTemp.what) {
vm.searchInput.what = vm.searchInputTemp.what;
}
}
if(type == 'cancelWhere') {
if(vm.searchInput.where != vm.searchInputTemp.where) {
vm.searchInput.where = vm.searchInputTemp.where;
}
}
}
if(type == 'all') {
$ionicNavBarDelegate.showBar(true);
// zeige Wo-Input
vm.showWhereInputField = true;
vm.showWhereSuggest = false;
// zeige Was-Input
vm.showWhatInputField = true;
vm.showWhatSuggest = false;
// verstecke Abbrechen Buttons
vm.showWhatCancel = false;
vm.showWhereCancel = false;
}
}
function setSearchParamsObject(params) {
// Suche über Was / Wen
if (params.searchType == "what") {
vm.searchParamsObject.was = params.selectedItem;
vm.searchParamsObject.was_i = params.inputItem;
vm.searchParamsObject.was_sel = (typeof params.was_sel !== 'undefined' && params.was_sel) ? params.was_sel : 0;
if (typeof params.gruppe_fach_param != 'undefined') {
var params = params.gruppe_fach_param.split("&");
for (var i=0; i<params.length; i++) {
var pair = params[i].split("=");
vm.searchParamsObject[pair[0]] = pair[1];
}
vm.searchParamsObject.namen = '';
} else if (typeof params.selectedItem != 'undefined' && typeof params.deeplink == 'undefined') {
vm.searchParamsObject.namen = params.selectedItem;
vm.searchParamsObject.gruppe_fach_param = '';
} else if (typeof params.deeplink != 'undefined' && params.deeplink != '' && typeof params.selectedItem != 'undefined') {
var customerId = JamHelperFactory.strReplace('/profil/', '', params.deeplink);
customerId = JamHelperFactory.strReplace('?beta=1', '', customerId);
var customerIdClean = customerId.split("_")[0];
if (typeof params.multi !== 'undefined' && params.multi.length > 1) {
// mehrere
|
llHeight() {
|
identifier_name
|
searchByName.js
|
';
vm.bestMatchSuggest.was_sel = 1;
if (data[0].header == 'Fachbereiche') {
vm.bestMatchSuggest.was = data[0].list[0].term;
vm.bestMatchSuggest.gruppe_fach_param = data[0].list[0].select;
} else if (data[0].header == 'Namen') {
vm.bestMatchSuggest.was = input.what;
vm.bestMatchSuggest.namen = input.what;
}
}
if ((typeof data === 'undefined' || data == '') && input != '') {
vm.bestMatchSuggest.searchType = 'what';
vm.bestMatchSuggest.was = input.what;
vm.bestMatchSuggest.namen = input.what;
vm.bestMatchSuggest.was_sel = 1;
}
}
}
function removeFocusFromInput() {
hideKeyboard();
// Höhe für scrollbaren Bereich für den Suggest setzen
setScrollHeight();
}
function hideKeyboard() {
ionic.DomUtil.blurAll();
}
function clearTextInput() {
// letzte Eingabe temporär zwischenspeichern
vm.searchInputTemp.what = vm.searchInput.what;
// Eingabe löschen
vm.searchInput.what = "";
}
function setScrollHeight() {
$timeout(function () {
var suggestScrollArea = $('ion-nav-view[name="menuContent"] ion-view[nav-view="active"] #suggest-scroll-area');
// Only on android tablets
if (vm.isTablet && vm.deviceOs == 'Android') {
var tmpHeight = $('ion-view[nav-view="active"] .jam-suggest-box-tablet').height();
if (tmpHeight > CONFIG.windowHeight) {
suggestScrollArea.height(CONFIG.windowHeight - 75);
} else {
suggestScrollArea.height(CONFIG.windowHeight - 124);
}
} else {
if (vm.isTablet) {
suggestScrollArea.height(CONFIG.windowHeight - 124);
} else {
suggestScrollArea.height(CONFIG.windowHeight - 75);
}
}
}, 100);
}
function controlInputFieldPosition(type) {
/* */
if(type == 'what') {
// GATRACKING
if (vm.showWhereInputField) {
AnalyticsHelper.trackEvent('Namenssuche - Klicks', 'Was Wen Suchleiste geklickt');
}
$ionicNavBarDelegate.showBar(false);
// zeige Was-Input
vm.showWhatInputField = true;
vm.showWhatSuggest = true;
/// verstecke Wo-Input
vm.showWhereInputField = false;
vm.showWhereSuggest = false;
// zeige Abbrechen Button
vm.showWhatCancel = true;
// letzte Eingabe temporär zwischenspeichern
vm.searchInputTemp.what = vm.searchInput.what;
// Suggest-Daten holen mit Eingabe
if(vm.searchInputTemp.what) {
vm.getSuggestData(type);
}
// Höhe für scrollbaren Bereich für den Suggest setzen
setScrollHeight();
}
if(type == 'cancelWhat' || type == 'cancelWhere') {
if (CONFIG.deviceType == 'phone' || CONFIG.deviceOrientation != 'landscape') {
$ionicNavBarDelegate.showBar(true);
}
// zeige Wo-Input
vm.showWhereInputField = true;
vm.showWhereSuggest = false;
// zeige Was-Input
vm.showWhatInputField = true;
vm.showWhatSuggest = false;
// verstecke Abbrechen Buttons
vm.showWhatCancel = false;
vm.showWhereCancel = false;
// Immer dann wenn die Eingabe sich verändert hat
// und auf Abbrechen geklickt,
// erste Eingabe zurück schreiben
if(type == 'cancelWhat') {
if(vm.searchInput.what != vm.searchInputTemp.what) {
vm.searchInput.what = vm.searchInputTemp.what;
}
}
if(type == 'cancelWhere') {
if(vm.searchInput.where != vm.searchInputTemp.where) {
vm.searchInput.where = vm.searchInputTemp.where;
}
}
}
if(type == 'all') {
$ionicNavBarDelegate.showBar(true);
// zeige Wo-Input
vm.showWhereInputField = true;
vm.showWhereSuggest = false;
// zeige Was-Input
vm.showWhatInputField = true;
vm.showWhatSuggest = false;
// verstecke Abbrechen Buttons
vm.showWhatCancel = false;
vm.showWhereCancel = false;
}
}
function setSearchParamsObject(params) {
// Suche über Was / Wen
if (params.searchType == "what") {
vm.searchParamsObject.was = params.selectedItem;
vm.searchParamsObject.was_i = params.inputItem;
vm.searchParamsObject.was_sel = (typeof params.was_sel !== 'undefined' && params.was_sel) ? params.was_sel : 0;
if (typeof params.gruppe_fach_param != 'undefined') {
var params = params.gruppe_fach_param.split("&");
for (var i=0; i<params.length; i++) {
var pair = params[i].split("=");
vm.searchParamsObject[pair[0]] = pair[1];
}
vm.searchParamsObject.namen = '';
} else if (typeof params.selectedItem != 'undefined' && typeof params.deeplink == 'undefined') {
vm.searchParamsObject.namen = params.selectedItem;
vm.searchParamsObject.gruppe_fach_param = '';
} else if (typeof params.deeplink != 'undefined' && params.deeplink != '' && typeof params.selectedItem != 'undefined') {
var customerId = JamHelperFactory.strReplace('/profil/', '', params.deeplink);
customerId = JamHelperFactory.strReplace('?beta=1', '', customerId);
var customerIdClean = customerId.split("_")[0];
if (typeof params.multi !== 'undefined' && params.multi.length > 1) {
// mehrere Standorte, direkt auf die Auswahlseite leiten
$state.go('searchResultListSub', {
refId: customerIdClean,
isSearch: true
});
} else {
// direkt auf das Profil leiten, Beispiel: /profil/uebersicht/45897203100_2/
$state.go('profile', {
fullRefId: customerId,
path: 'profil',
backLinkType: 'deeplink',
isSearch: true
});
}
}
}
// Personen-Suche aus dem Cache aufrufen
if (params.searchType == "personSearchFromCache") {
// Profil
if (typeof params.objFromCache.what_name_nice.refId !== 'undefined' && params.objFromCache.what_name_nice.refId != '') {
// direkt auf das Profil leiten
$state.go('profile', {
fullRefId: params.objFromCache.what_name_nice.refId,
path: 'profil',
backLinkType: 'deeplink',
isSearch: true
});
}
}
}
function setChosenSuggestItem(params) {
// GATRACKING
AnalyticsHelper.trackEvent('Namenssuche - Klicks', 'Namenssuggest geklickt');
/* */
setSearchParamsObject(params);
if(params.searchType == 'what') {
vm.searchInput.what = params.selectedItem;
}
if(params.searchType == 'where') {
vm.searchInput.where = params.selectedItem;
}
// zeige alles
controlInputFieldPosition('all');
}
function getSuggestData(type) {
/* */
// hole Daten für Suggest
SuggestFactory.getSuggestDataByName(vm.searchInput).then(
function(data) {
data = data.data;
setBestMatchSuggest(type, data.suggests, vm.searchInput);
vm.searchInput.results = data.suggests;
setScrollHeight();
}
);
// zeige Was-Suggest
if(type == 'what') {
vm.showWhatSuggest = true;
vm.showWhereSuggest = false;
}
// zeige Wo-Suggest
if(type == 'where') {
vm.showWhatSuggest = false;
vm.showWhereSuggest = true;
}
}
function clearSearch(type) {
/* */
if(type == 'cancelWhat') {
vm.searchInput.what = '';
}
if(type == 'cancelWhere') {
vm.searchInput.where = '';
}
}
function search() {
$state.go
|
('searchResultList', vm.searchParamsObject);
}
activate(
|
identifier_body
|
|
time.rs
|
64 / NSEC_PER_MSEC)
}
///Gets a representation of this timespec as a number of seconds
pub fn to_seconds(&self) -> i64 {
self.to_milliseconds() / MSEC_PER_SEC
}
///Clears this timespec, setting each value to zero
pub fn clear(&mut self) {
self.tv_sec = 0;
self.tv_nsec = 0;
}
}
///A structure that contains the number of seconds and microseconds since an epoch.
///
///If in doubt, assume we're talking about the UNIX epoch.
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct timeval {
///The number of seconds contained in this timeval
pub tv_sec: ::time_t,
///The number of microseconds contained in this timeval
pub tv_usec: ::suseconds_t
}
impl timeval {
///Creates a new timeval with both values defaulting to zero
pub fn new() -> timeval {
timeval { tv_sec: 0, tv_usec: 0 }
}
///Creates a new timeval from the specified number of seconds
pub fn from_seconds(seconds: ::time_t) -> timeval {
timeval { tv_sec: seconds, tv_usec: 0 }
}
///Gets a representation of this timeval as a number of milliseconds
pub fn to_milliseconds(&self) -> i64 {
(self.tv_sec as i64 * MSEC_PER_SEC) + (self.tv_usec as i64 / USEC_PER_MSEC)
}
///Gets a representation of this timeval as a number of seconds
pub fn to_seconds(&self) -> i64 {
self.to_milliseconds() / MSEC_PER_SEC
}
///Clears this timeval, setting each value to zero
pub fn clear(&mut self) {
self.tv_sec = 0;
self.tv_usec = 0;
}
}
///A structure containing information on the time-based location of a timezone
///
///Please note that this does not include the name or country code, only the minutes west of Greenwich and the type of DST correction
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct timezone {
///The number of minutes west of Greenwich
pub tz_minuteswest: ::c_int,
///The type of Daylight Savings Time correction
pub tz_dsttime: ::c_int
}
//Names of the interval timers
///An interval timer that decrements in real time
///
///On expiration, a SIGALRM is delivered
pub const ITIMER_REAL: ::c_int = 0;
///An interval timer that decrements only when the process is executing.
///
///On expiration, a SIGVTALRM is delivered
pub const ITIMER_VIRTUAL: ::c_int = 1;
///Decrements both while the process is executing and while the system is executing on behalf of the process
///
///This is usually used to profile kernel-space and user-space concurrently.
///
///If coupled with ITIMER_VIRTUAL, you can separate the two values - What is left when ITIMER_VIRTUAL's value is removed is kernel time
pub const ITIMER_PROF: ::c_int = 2;
///An interval timer based on a `timespec`
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct itimerspec {
///The period of time this timer should run for (Need to verify)
pub it_interval: timespec,
///The amount of time left until expiration (Need to verify)
pub it_value: timespec
}
///An interval timer based on a `timeval`
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct itimerval {
|
///The amount of time left until expiration (Need to verify)
pub it_value: timeval
}
///A system-wide clock that measures time from the "real world"
///
///This clock **is** affected by discontinuous jumps in system time, NTP, and user changes
pub const CLOCK_REALTIME: ::clockid_t = 0;
///A clock that measures monotonic time since an unspecified starting point
///
///Unless you manage to break your system, this unspecified point is usually when your computer powers on.
///
///This is not affected by user changes, but is by `adjtime` and NTP.
pub const CLOCK_MONOTONIC: ::clockid_t = 1;
///A high-resolution per-process timer from the processor.
pub const CLOCK_PROCESS_CPUTIME_ID: ::clockid_t = 2;
///A (high-resolution?) thread-specific timer from the processor
pub const CLOCK_THREAD_CPUTIME_ID: ::clockid_t = 3;
///A hardware-based version of `CLOCK_MONOTONIC` that is not subject to changes
pub const CLOCK_MONOTONIC_RAW: ::clockid_t = 4;
///A faster but less precise version of `CLOCK_REALTIME`, measuring time in the "real world"
pub const CLOCK_REALTIME_COARSE: ::clockid_t = 5;
///A faster but less precise version of `CLOCK_MONOTONIC`, measuring time since an unspecified starting point
pub const CLOCK_MONOTONIC_COARSE: ::clockid_t = 6;
///Identical to `CLOCK_MONOTONIC`, but includes any time that the system is suspended.
pub const CLOCK_BOOTIME: ::clockid_t = 7;
///Identical to `CLOCK_REALTIME`, but timers exposed via this will wake the system if suspended
pub const CLOCK_REALTIME_ALARM: ::clockid_t = 8;
///Identical to `CLOCK_BOOTIME`, but timers exposed via this will wake the system if suspended
pub const CLOCK_BOOTTIME_ALARM: ::clockid_t = 9;
///A clock used for SGI systems. Need to investigate
pub const CLOCK_SGI_CYCLE: ::clockid_t = 10;
///A clock that shows International Atomic Time
pub const CLOCK_TAI: ::clockid_t = 11;
///The maximum clock ID that the system is allowed to have
pub const MAX_CLOCKS: ::clockid_t = 16; //Resolves to c_int. Please let me know if this should be c_int on it's own
///A mask for supported clocks
///
///Needs to be investigated
pub const CLOCKS_MASK: ::clockid_t = CLOCK_REALTIME | CLOCK_MONOTONIC;
///A shorthand variant of CLOCK_MONOTONIC.
///
///This isn't used in the kernel. Is it left over from an old change that was reverted?
pub const CLOCKS_MONO: ::clockid_t = CLOCK_MONOTONIC;
///A flag indicating time is absolute
pub const TIMER_ABSTIME: ::c_int = 0x01;
///The type used for 64-bit time
pub type time64_t = i64;
///The number of milliseconds in a second
pub const MSEC_PER_SEC: ::c_long = 1000;
///The number of microseconds in a millisecond
pub const USEC_PER_MSEC: ::c_long = 1000;
///The number of nanoseconds in a microsecond
pub const NSEC_PER_USEC: ::c_long = 1000;
///The number of nanoseconds in a millisecond
pub const NSEC_PER_MSEC: ::c_long = 1000000;
///The number of microseconds in a second
pub const USEC_PER_SEC: ::c_long = 1000000;
///The number of nanoseconds in a second
pub const NSEC_PER_SEC: ::c_long = 1000000000;
///The number of femtoseconds in a second
pub const FSEC_PER_SEC: ::c_longlong = 1000000000000000;
#[cfg(any(target_arch = "x86",
target_arch = "le32",
target_arch = "powerpc",
target_arch = "arm",
target_arch = "mips",
target_arch = "mipsel"))]
pub const TIME_T_MAX: ::time_t = 0b01111111111111111111111111111111;
#[cfg(any(target_arch = "x86_64",
target_arch = "aarch64"))]
pub const TIME_T_MAX: ::time_t = 0b0111111111111111111111111111111111111111111111111111111111111111;
///The maximum value of a time64_t
pub const TIME64_MAX: ::c_longlong = 0b0111111111111111111111111111111111111111111111111111111111111111;
///The maximum value of a ktime_t
pub const KTIME_MAX: ::c_longlong = 9_223_372_036_854_775_807;
///The maximum number of seconds in a ktime_t
|
///The period of time this timer should run for (Need to verify)
pub it_interval: timeval,
|
random_line_split
|
time.rs
|
{
///The number of seconds contained in this timespec
pub tv_sec: ::time_t,
///The number of nanoseconds contained in this timespec
pub tv_nsec: ::c_long
}
impl timespec {
///Creates a new timespec with both values defaulting to zero
pub fn new() -> timespec {
timespec { tv_sec: 0, tv_nsec: 0 }
}
///Creates a new timespec from the specified number of seconds
pub fn from_seconds(seconds: i64) -> timespec {
timespec { tv_sec: seconds, tv_nsec: 0 }
}
///Gets a representation of this timespec as a number of milliseconds
pub fn to_milliseconds(&self) -> i64 {
(self.tv_sec as i64 * MSEC_PER_SEC) + (self.tv_nsec as i64 / NSEC_PER_MSEC)
}
///Gets a representation of this timespec as a number of seconds
pub fn to_seconds(&self) -> i64 {
self.to_milliseconds() / MSEC_PER_SEC
}
///Clears this timespec, setting each value to zero
pub fn clear(&mut self) {
self.tv_sec = 0;
self.tv_nsec = 0;
}
}
///A structure that contains the number of seconds and microseconds since an epoch.
///
///If in doubt, assume we're talking about the UNIX epoch.
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct timeval {
///The number of seconds contained in this timeval
pub tv_sec: ::time_t,
///The number of microseconds contained in this timeval
pub tv_usec: ::suseconds_t
}
impl timeval {
///Creates a new timeval with both values defaulting to zero
pub fn new() -> timeval {
timeval { tv_sec: 0, tv_usec: 0 }
}
///Creates a new timeval from the specified number of seconds
pub fn from_seconds(seconds: ::time_t) -> timeval {
timeval { tv_sec: seconds, tv_usec: 0 }
}
///Gets a representation of this timeval as a number of milliseconds
pub fn to_milliseconds(&self) -> i64 {
(self.tv_sec as i64 * MSEC_PER_SEC) + (self.tv_usec as i64 / USEC_PER_MSEC)
}
///Gets a representation of this timeval as a number of seconds
pub fn to_seconds(&self) -> i64 {
self.to_milliseconds() / MSEC_PER_SEC
}
///Clears this timeval, setting each value to zero
pub fn clear(&mut self) {
self.tv_sec = 0;
self.tv_usec = 0;
}
}
///A structure containing information on the time-based location of a timezone
///
///Please note that this does not include the name or country code, only the minutes west of Greenwich and the type of DST correction
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct timezone {
///The number of minutes west of Greenwich
pub tz_minuteswest: ::c_int,
///The type of Daylight Savings Time correction
pub tz_dsttime: ::c_int
}
//Names of the interval timers
///An interval timer that decrements in real time
///
///On expiration, a SIGALRM is delivered
pub const ITIMER_REAL: ::c_int = 0;
///An interval timer that decrements only when the process is executing.
///
///On expiration, a SIGVTALRM is delivered
pub const ITIMER_VIRTUAL: ::c_int = 1;
///Decrements both while the process is executing and while the system is executing on behalf of the process
///
///This is usually used to profile kernel-space and user-space concurrently.
///
///If coupled with ITIMER_VIRTUAL, you can separate the two values - What is left when ITIMER_VIRTUAL's value is removed is kernel time
pub const ITIMER_PROF: ::c_int = 2;
///An interval timer based on a `timespec`
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct itimerspec {
///The period of time this timer should run for (Need to verify)
pub it_interval: timespec,
///The amount of time left until expiration (Need to verify)
pub it_value: timespec
}
///An interval timer based on a `timeval`
#[repr(C)]
#[derive(Debug, Clone, Copy)]
pub struct itimerval {
///The period of time this timer should run for (Need to verify)
pub it_interval: timeval,
///The amount of time left until expiration (Need to verify)
pub it_value: timeval
}
///A system-wide clock that measures time from the "real world"
///
///This clock **is** affected by discontinuous jumps in system time, NTP, and user changes
pub const CLOCK_REALTIME: ::clockid_t = 0;
///A clock that measures monotonic time since an unspecified starting point
///
///Unless you manage to break your system, this unspecified point is usually when your computer powers on.
///
///This is not affected by user changes, but is by `adjtime` and NTP.
pub const CLOCK_MONOTONIC: ::clockid_t = 1;
///A high-resolution per-process timer from the processor.
pub const CLOCK_PROCESS_CPUTIME_ID: ::clockid_t = 2;
///A (high-resolution?) thread-specific timer from the processor
pub const CLOCK_THREAD_CPUTIME_ID: ::clockid_t = 3;
///A hardware-based version of `CLOCK_MONOTONIC` that is not subject to changes
pub const CLOCK_MONOTONIC_RAW: ::clockid_t = 4;
///A faster but less precise version of `CLOCK_REALTIME`, measuring time in the "real world"
pub const CLOCK_REALTIME_COARSE: ::clockid_t = 5;
///A faster but less precise version of `CLOCK_MONOTONIC`, measuring time since an unspecified starting point
pub const CLOCK_MONOTONIC_COARSE: ::clockid_t = 6;
///Identical to `CLOCK_MONOTONIC`, but includes any time that the system is suspended.
pub const CLOCK_BOOTIME: ::clockid_t = 7;
///Identical to `CLOCK_REALTIME`, but timers exposed via this will wake the system if suspended
pub const CLOCK_REALTIME_ALARM: ::clockid_t = 8;
///Identical to `CLOCK_BOOTIME`, but timers exposed via this will wake the system if suspended
pub const CLOCK_BOOTTIME_ALARM: ::clockid_t = 9;
///A clock used for SGI systems. Need to investigate
pub const CLOCK_SGI_CYCLE: ::clockid_t = 10;
///A clock that shows International Atomic Time
pub const CLOCK_TAI: ::clockid_t = 11;
///The maximum clock ID that the system is allowed to have
pub const MAX_CLOCKS: ::clockid_t = 16; //Resolves to c_int. Please let me know if this should be c_int on it's own
///A mask for supported clocks
///
///Needs to be investigated
pub const CLOCKS_MASK: ::clockid_t = CLOCK_REALTIME | CLOCK_MONOTONIC;
///A shorthand variant of CLOCK_MONOTONIC.
///
///This isn't used in the kernel. Is it left over from an old change that was reverted?
pub const CLOCKS_MONO: ::clockid_t = CLOCK_MONOTONIC;
///A flag indicating time is absolute
pub const TIMER_ABSTIME: ::c_int = 0x01;
///The type used for 64-bit time
pub type time64_t = i64;
///The number of milliseconds in a second
pub const MSEC_PER_SEC: ::c_long = 1000;
///The number of microseconds in a millisecond
pub const USEC_PER_MSEC: ::c_long = 1000;
///The number of nanoseconds in a microsecond
pub const NSEC_PER_USEC: ::c_long = 1000;
///The number of nanoseconds in a millisecond
pub const NSEC_PER_MSEC: ::c_long = 1000000;
///The number of microseconds in a second
pub const USEC_PER_SEC: ::c_long = 1000000;
///The number of nanoseconds in a second
pub const NSEC_PER_SEC: ::c_long = 1000000000;
///The number of femtoseconds in a second
pub const FSEC_PER_SEC: ::c_longlong = 1000000000000000;
#[cfg(any(target_arch = "x86",
target_arch = "le32",
target_arch = "powerpc",
target_arch = "arm",
target_arch = "mips",
target_arch = "mipsel"))]
pub const TIME_T_MAX: ::time_t = 0b01111111111111111111111111111111;
#[cfg(any(target_arch = "x86_64",
target_arch = "aarch64"))]
pub const TIME_T_MAX: ::time_t = 0b011111111111111111111111111111111
|
timespec
|
identifier_name
|
|
format_wav_scp.py
|
, fs: int) -> np.array:
# Conduct trim wtih vad information
assert check_argument_types()
assert uttid in vad_reader, uttid
vad_info = vad_reader[uttid]
total_length = sum(int((time[1] - time[0]) * fs) for time in vad_info)
new_wav = np.zeros((total_length,), dtype=wav.dtype)
start_frame = 0
for time in vad_info:
# Note: we regard vad as [xxx, yyy)
duration = int((time[1] - time[0]) * fs)
orig_start_frame = int(time[0] * fs)
orig_end_frame = orig_start_frame + duration
end_frame = start_frame + duration
new_wav[start_frame:end_frame] = wav[orig_start_frame:orig_end_frame]
start_frame = end_frame
return new_wav
class SegmentsExtractor:
"""Emulating kaldi extract-segments.cc
Args:
segments (str): The file format is
"<segment-id> <recording-id> <start-time> <end-time>\n"
"e.g. call-861225-A-0050-0065 call-861225-A 5.0 6.5\n"
"""
def __init__(self, fname: str, segments: str = None, multi_columns: bool = False):
assert check_argument_types()
self.wav_scp = fname
self.multi_columns = multi_columns
self.wav_dict = {}
with open(self.wav_scp, "r") as f:
for line in f:
recodeid, wavpath = line.strip().split(None, 1)
if recodeid in self.wav_dict:
raise RuntimeError(f"{recodeid} is duplicated")
self.wav_dict[recodeid] = wavpath
self.segments = segments
self.segments_dict = {}
with open(self.segments, "r") as f:
for line in f:
sps = line.rstrip().split(None)
if len(sps) != 4:
raise RuntimeError("Format is invalid: {}".format(line))
uttid, recodeid, st, et = sps
self.segments_dict[uttid] = (recodeid, float(st), float(et))
if recodeid not in self.wav_dict:
raise RuntimeError(
'Not found "{}" in {}'.format(recodeid, self.wav_scp)
)
def generator(self):
recodeid_counter = {}
for utt, (recodeid, st, et) in self.segments_dict.items():
recodeid_counter[recodeid] = recodeid_counter.get(recodeid, 0) + 1
cached = {}
for utt, (recodeid, st, et) in self.segments_dict.items():
wavpath = self.wav_dict[recodeid]
if recodeid not in cached:
if wavpath.endswith("|"):
if self.multi_columns:
raise RuntimeError(
"Not supporting multi_columns wav.scp for inputs by pipe"
)
# Streaming input e.g. cat a.wav |
with kaldiio.open_like_kaldi(wavpath, "rb") as f:
with BytesIO(f.read()) as g:
array, rate = soundfile.read(g)
else:
if self.multi_columns:
array, rate = soundfile_read(
wavs=wavpath.split(),
dtype=None,
always_2d=False,
concat_axis=1,
)
else:
array, rate = soundfile.read(wavpath)
cached[recodeid] = array, rate
array, rate = cached[recodeid]
# Keep array until the last query
recodeid_counter[recodeid] -= 1
if recodeid_counter[recodeid] == 0:
cached.pop(recodeid)
# Convert starting time of the segment to corresponding sample number.
# If end time is -1 then use the whole file starting from start time.
if et != -1:
array = array[int(st * rate) : int(et * rate)]
else:
array = array[int(st * rate) :]
yield utt, (array, rate), None, None
def main():
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=logfmt)
logging.info(get_commandline_args())
parser = argparse.ArgumentParser(
description='Create waves list from "wav.scp"',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("scp")
parser.add_argument("outdir")
parser.add_argument(
"--name",
default="wav",
help='Specify the prefix word of output file name such as "wav.scp"',
)
parser.add_argument("--segments", default=None)
parser.add_argument(
"--fs",
type=humanfriendly_or_none,
default=None,
help="If the sampling rate specified, Change the sampling rate.",
)
parser.add_argument("--audio-format", default="wav")
parser.add_argument("--vad_based_trim", type=str, default=None)
group = parser.add_mutually_exclusive_group()
group.add_argument("--ref-channels", default=None, type=str2int_tuple)
group.add_argument("--utt2ref-channels", default=None, type=str)
group.add_argument(
"--audio-subtype",
default=None,
type=str,
help=(
"Give a interpretable subtype by soundfile e.g. PCM_16. "
"You can check all available types by soundfile.available_subtypes()"
),
)
parser.add_argument(
"--multi-columns-input",
type=str2bool,
default=False,
help=(
"Enable multi columns mode for input wav.scp. "
"e.g. 'ID a.wav b.wav c.wav' is interpreted as 3ch audio data"
),
)
parser.add_argument(
"--multi-columns-output",
type=str2bool,
default=False,
help=(
"Enable multi columns mode for output wav.scp. "
"e.g. If input audio data has 2ch, "
"each line in wav.scp has the the format like "
"'ID ID-CH0.wav ID-CH1.wav'"
),
)
args = parser.parse_args()
out_num_samples = Path(args.outdir) / "utt2num_samples"
if args.ref_channels is not None:
def utt2ref_channels(x) -> Tuple[int, ...]:
return args.ref_channels
elif args.utt2ref_channels is not None:
utt2ref_channels_dict = read_2columns_text(args.utt2ref_channels)
def utt2ref_channels(x, d=utt2ref_channels_dict) -> Tuple[int, ...]:
chs_str = d[x]
return tuple(map(int, chs_str.split()))
else:
utt2ref_channels = None
if args.audio_format.endswith("ark") and args.multi_columns_output:
raise RuntimeError("Multi columns wav.scp is not supported for ark type")
Path(args.outdir).mkdir(parents=True, exist_ok=True)
out_wavscp = Path(args.outdir) / f"{args.name}.scp"
|
fark = open(Path(args.outdir) / f"data_{args.name}.ark", "wb")
fscp_out = out_wavscp.open("w")
writer = None
else:
writer = SoundScpWriter(
args.outdir,
out_wavscp,
format=args.audio_format,
multi_columns=args.multi_columns_output,
subtype=args.audio_subtype,
)
fscp_out = None
if args.vad_based_trim is not None:
vad_reader = VADScpReader(args.vad_based_trim)
if args.segments is not None:
extractor = SegmentsExtractor(
args.scp, segments=args.segments, multi_columns=args.multi_columns_input
)
generator = extractor.generator
else:
def generator():
with Path(args.scp).open("r") as fscp:
for line in tqdm(fscp):
uttid, wavpath = line.strip().split(None, 1)
# B.a. Without segments and using pipe inputs
if wavpath.endswith("|"):
if args.multi_columns_input:
raise RuntimeError(
"Not supporting multi_columns wav.scp for inputs by"
" pipe"
)
# Streaming input e.g. cat a.wav |
with kaldiio.open_like_kaldi(wavpath, "rb") as f:
with BytesIO(f.read()) as g:
wave, rate = soundfile.read(g)
subtypes = None
# B.b Without segments and not using pipe
else:
if args.multi_columns_input:
wave, rate, subtypes = soundfile_read(
wavs=wavpath.split(),
dtype=None,
always_2d=False,
concat_axis=1,
return_subtype=True,
)
else:
with soundfile.SoundFile(wavpath) as sf:
rate = sf.samplerate
subtypes = [sf.subtype]
wave = sf.read()
yield uttid, (wave, rate), wavpath, subtypes
with out_num_samples.open("w")
|
if args.audio_format.endswith("ark"):
|
random_line_split
|
format_wav_scp.py
|
, fs: int) -> np.array:
# Conduct trim wtih vad information
assert check_argument_types()
assert uttid in vad_reader, uttid
vad_info = vad_reader[uttid]
total_length = sum(int((time[1] - time[0]) * fs) for time in vad_info)
new_wav = np.zeros((total_length,), dtype=wav.dtype)
start_frame = 0
for time in vad_info:
# Note: we regard vad as [xxx, yyy)
duration = int((time[1] - time[0]) * fs)
orig_start_frame = int(time[0] * fs)
orig_end_frame = orig_start_frame + duration
end_frame = start_frame + duration
new_wav[start_frame:end_frame] = wav[orig_start_frame:orig_end_frame]
start_frame = end_frame
return new_wav
class SegmentsExtractor:
"""Emulating kaldi extract-segments.cc
Args:
segments (str): The file format is
"<segment-id> <recording-id> <start-time> <end-time>\n"
"e.g. call-861225-A-0050-0065 call-861225-A 5.0 6.5\n"
"""
def __init__(self, fname: str, segments: str = None, multi_columns: bool = False):
assert check_argument_types()
self.wav_scp = fname
self.multi_columns = multi_columns
self.wav_dict = {}
with open(self.wav_scp, "r") as f:
for line in f:
recodeid, wavpath = line.strip().split(None, 1)
if recodeid in self.wav_dict:
raise RuntimeError(f"{recodeid} is duplicated")
self.wav_dict[recodeid] = wavpath
self.segments = segments
self.segments_dict = {}
with open(self.segments, "r") as f:
for line in f:
sps = line.rstrip().split(None)
if len(sps) != 4:
raise RuntimeError("Format is invalid: {}".format(line))
uttid, recodeid, st, et = sps
self.segments_dict[uttid] = (recodeid, float(st), float(et))
if recodeid not in self.wav_dict:
raise RuntimeError(
'Not found "{}" in {}'.format(recodeid, self.wav_scp)
)
def generator(self):
recodeid_counter = {}
for utt, (recodeid, st, et) in self.segments_dict.items():
recodeid_counter[recodeid] = recodeid_counter.get(recodeid, 0) + 1
cached = {}
for utt, (recodeid, st, et) in self.segments_dict.items():
wavpath = self.wav_dict[recodeid]
if recodeid not in cached:
if wavpath.endswith("|"):
if self.multi_columns:
raise RuntimeError(
"Not supporting multi_columns wav.scp for inputs by pipe"
)
# Streaming input e.g. cat a.wav |
with kaldiio.open_like_kaldi(wavpath, "rb") as f:
with BytesIO(f.read()) as g:
array, rate = soundfile.read(g)
else:
if self.multi_columns:
array, rate = soundfile_read(
wavs=wavpath.split(),
dtype=None,
always_2d=False,
concat_axis=1,
)
else:
array, rate = soundfile.read(wavpath)
cached[recodeid] = array, rate
array, rate = cached[recodeid]
# Keep array until the last query
recodeid_counter[recodeid] -= 1
if recodeid_counter[recodeid] == 0:
cached.pop(recodeid)
# Convert starting time of the segment to corresponding sample number.
# If end time is -1 then use the whole file starting from start time.
if et != -1:
|
else:
array = array[int(st * rate) :]
yield utt, (array, rate), None, None
def main():
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=logfmt)
logging.info(get_commandline_args())
parser = argparse.ArgumentParser(
description='Create waves list from "wav.scp"',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("scp")
parser.add_argument("outdir")
parser.add_argument(
"--name",
default="wav",
help='Specify the prefix word of output file name such as "wav.scp"',
)
parser.add_argument("--segments", default=None)
parser.add_argument(
"--fs",
type=humanfriendly_or_none,
default=None,
help="If the sampling rate specified, Change the sampling rate.",
)
parser.add_argument("--audio-format", default="wav")
parser.add_argument("--vad_based_trim", type=str, default=None)
group = parser.add_mutually_exclusive_group()
group.add_argument("--ref-channels", default=None, type=str2int_tuple)
group.add_argument("--utt2ref-channels", default=None, type=str)
group.add_argument(
"--audio-subtype",
default=None,
type=str,
help=(
"Give a interpretable subtype by soundfile e.g. PCM_16. "
"You can check all available types by soundfile.available_subtypes()"
),
)
parser.add_argument(
"--multi-columns-input",
type=str2bool,
default=False,
help=(
"Enable multi columns mode for input wav.scp. "
"e.g. 'ID a.wav b.wav c.wav' is interpreted as 3ch audio data"
),
)
parser.add_argument(
"--multi-columns-output",
type=str2bool,
default=False,
help=(
"Enable multi columns mode for output wav.scp. "
"e.g. If input audio data has 2ch, "
"each line in wav.scp has the the format like "
"'ID ID-CH0.wav ID-CH1.wav'"
),
)
args = parser.parse_args()
out_num_samples = Path(args.outdir) / "utt2num_samples"
if args.ref_channels is not None:
def utt2ref_channels(x) -> Tuple[int, ...]:
return args.ref_channels
elif args.utt2ref_channels is not None:
utt2ref_channels_dict = read_2columns_text(args.utt2ref_channels)
def utt2ref_channels(x, d=utt2ref_channels_dict) -> Tuple[int, ...]:
chs_str = d[x]
return tuple(map(int, chs_str.split()))
else:
utt2ref_channels = None
if args.audio_format.endswith("ark") and args.multi_columns_output:
raise RuntimeError("Multi columns wav.scp is not supported for ark type")
Path(args.outdir).mkdir(parents=True, exist_ok=True)
out_wavscp = Path(args.outdir) / f"{args.name}.scp"
if args.audio_format.endswith("ark"):
fark = open(Path(args.outdir) / f"data_{args.name}.ark", "wb")
fscp_out = out_wavscp.open("w")
writer = None
else:
writer = SoundScpWriter(
args.outdir,
out_wavscp,
format=args.audio_format,
multi_columns=args.multi_columns_output,
subtype=args.audio_subtype,
)
fscp_out = None
if args.vad_based_trim is not None:
vad_reader = VADScpReader(args.vad_based_trim)
if args.segments is not None:
extractor = SegmentsExtractor(
args.scp, segments=args.segments, multi_columns=args.multi_columns_input
)
generator = extractor.generator
else:
def generator():
with Path(args.scp).open("r") as fscp:
for line in tqdm(fscp):
uttid, wavpath = line.strip().split(None, 1)
# B.a. Without segments and using pipe inputs
if wavpath.endswith("|"):
if args.multi_columns_input:
raise RuntimeError(
"Not supporting multi_columns wav.scp for inputs by"
" pipe"
)
# Streaming input e.g. cat a.wav |
with kaldiio.open_like_kaldi(wavpath, "rb") as f:
with BytesIO(f.read()) as g:
wave, rate = soundfile.read(g)
subtypes = None
# B.b Without segments and not using pipe
else:
if args.multi_columns_input:
wave, rate, subtypes = soundfile_read(
wavs=wavpath.split(),
dtype=None,
always_2d=False,
concat_axis=1,
return_subtype=True,
)
else:
with soundfile.SoundFile(wavpath) as sf:
rate = sf.samplerate
subtypes = [sf.subtype]
wave = sf.read()
yield uttid, (wave, rate), wavpath, subtypes
with out_num_samples.open("w")
|
array = array[int(st * rate) : int(et * rate)]
|
conditional_block
|
format_wav_scp.py
|
def str2int_tuple(integers: str) -> Optional[Tuple[int, ...]]:
"""
>>> str2int_tuple('3,4,5')
(3, 4, 5)
"""
assert check_argument_types()
if integers.strip() in ("none", "None", "NONE", "null", "Null", "NULL"):
return None
return tuple(map(int, integers.strip().split(",")))
def vad_trim(vad_reader: VADScpReader, uttid: str, wav: np.array, fs: int) -> np.array:
# Conduct trim wtih vad information
assert check_argument_types()
assert uttid in vad_reader, uttid
vad_info = vad_reader[uttid]
total_length = sum(int((time[1] - time[0]) * fs) for time in vad_info)
new_wav = np.zeros((total_length,), dtype=wav.dtype)
start_frame = 0
for time in vad_info:
# Note: we regard vad as [xxx, yyy)
duration = int((time[1] - time[0]) * fs)
orig_start_frame = int(time[0] * fs)
orig_end_frame = orig_start_frame + duration
end_frame = start_frame + duration
new_wav[start_frame:end_frame] = wav[orig_start_frame:orig_end_frame]
start_frame = end_frame
return new_wav
class SegmentsExtractor:
"""Emulating kaldi extract-segments.cc
Args:
segments (str): The file format is
"<segment-id> <recording-id> <start-time> <end-time>\n"
"e.g. call-861225-A-0050-0065 call-861225-A 5.0 6.5\n"
"""
def __init__(self, fname: str, segments: str = None, multi_columns: bool = False):
assert check_argument_types()
self.wav_scp = fname
self.multi_columns = multi_columns
self.wav_dict = {}
with open(self.wav_scp, "r") as f:
for line in f:
recodeid, wavpath = line.strip().split(None, 1)
if recodeid in self.wav_dict:
raise RuntimeError(f"{recodeid} is duplicated")
self.wav_dict[recodeid] = wavpath
self.segments = segments
self.segments_dict = {}
with open(self.segments, "r") as f:
for line in f:
sps = line.rstrip().split(None)
if len(sps) != 4:
raise RuntimeError("Format is invalid: {}".format(line))
uttid, recodeid, st, et = sps
self.segments_dict[uttid] = (recodeid, float(st), float(et))
if recodeid not in self.wav_dict:
raise RuntimeError(
'Not found "{}" in {}'.format(recodeid, self.wav_scp)
)
def generator(self):
recodeid_counter = {}
for utt, (recodeid, st, et) in self.segments_dict.items():
recodeid_counter[recodeid] = recodeid_counter.get(recodeid, 0) + 1
cached = {}
for utt, (recodeid, st, et) in self.segments_dict.items():
wavpath = self.wav_dict[recodeid]
if recodeid not in cached:
if wavpath.endswith("|"):
if self.multi_columns:
raise RuntimeError(
"Not supporting multi_columns wav.scp for inputs by pipe"
)
# Streaming input e.g. cat a.wav |
with kaldiio.open_like_kaldi(wavpath, "rb") as f:
with BytesIO(f.read()) as g:
array, rate = soundfile.read(g)
else:
if self.multi_columns:
array, rate = soundfile_read(
wavs=wavpath.split(),
dtype=None,
always_2d=False,
concat_axis=1,
)
else:
array, rate = soundfile.read(wavpath)
cached[recodeid] = array, rate
array, rate = cached[recodeid]
# Keep array until the last query
recodeid_counter[recodeid] -= 1
if recodeid_counter[recodeid] == 0:
cached.pop(recodeid)
# Convert starting time of the segment to corresponding sample number.
# If end time is -1 then use the whole file starting from start time.
if et != -1:
array = array[int(st * rate) : int(et * rate)]
else:
array = array[int(st * rate) :]
yield utt, (array, rate), None, None
def main():
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=logfmt)
logging.info(get_commandline_args())
parser = argparse.ArgumentParser(
description='Create waves list from "wav.scp"',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("scp")
parser.add_argument("outdir")
parser.add_argument(
"--name",
default="wav",
help='Specify the prefix word of output file name such as "wav.scp"',
)
parser.add_argument("--segments", default=None)
parser.add_argument(
"--fs",
type=humanfriendly_or_none,
default=None,
help="If the sampling rate specified, Change the sampling rate.",
)
parser.add_argument("--audio-format", default="wav")
parser.add_argument("--vad_based_trim", type=str, default=None)
group = parser.add_mutually_exclusive_group()
group.add_argument("--ref-channels", default=None, type=str2int_tuple)
group.add_argument("--utt2ref-channels", default=None, type=str)
group.add_argument(
"--audio-subtype",
default=None,
type=str,
help=(
"Give a interpretable subtype by soundfile e.g. PCM_16. "
"You can check all available types by soundfile.available_subtypes()"
),
)
parser.add_argument(
"--multi-columns-input",
type=str2bool,
default=False,
help=(
"Enable multi columns mode for input wav.scp. "
"e.g. 'ID a.wav b.wav c.wav' is interpreted as 3ch audio data"
),
)
parser.add_argument(
"--multi-columns-output",
type=str2bool,
default=False,
help=(
"Enable multi columns mode for output wav.scp. "
"e.g. If input audio data has 2ch, "
"each line in wav.scp has the the format like "
"'ID ID-CH0.wav ID-CH1.wav'"
),
)
args = parser.parse_args()
out_num_samples = Path(args.outdir) / "utt2num_samples"
if args.ref_channels is not None:
def utt2ref_channels(x) -> Tuple[int, ...]:
return args.ref_channels
elif args.utt2ref_channels is not None:
utt2ref_channels_dict = read_2columns_text(args.utt2ref_channels)
def utt2ref_channels(x, d=utt2ref_channels_dict) -> Tuple[int, ...]:
chs_str = d[x]
return tuple(map(int, chs_str.split()))
else:
utt2ref_channels = None
if args.audio_format.endswith("ark") and args.multi_columns_output:
raise RuntimeError("Multi columns wav.scp is not supported for ark type")
Path(args.outdir).mkdir(parents=True, exist_ok=True)
out_wavscp = Path(args.outdir) / f"{args.name}.scp"
if args.audio_format.endswith("ark"):
fark = open(Path(args.outdir) / f"data_{args.name}.ark", "wb")
fscp_out = out_wavscp.open("w")
writer = None
else:
writer = SoundScpWriter(
args.outdir,
out_wavscp,
format=args.audio_format,
multi_columns=args.multi_columns_output,
subtype=args.audio_subtype,
)
fscp_out = None
if args.vad_based_trim is not None:
vad_reader = VADScpReader(args.vad_based_trim)
if args.segments is not None:
extractor = SegmentsExtractor(
args.scp, segments=args.segments, multi_columns=args.multi_columns_input
)
generator = extractor.generator
else:
def generator():
with Path(args.scp).open("r") as fscp:
for line in tqdm(fscp):
uttid, wavpath = line.strip().split(None, 1)
# B.a. Without segments and using pipe inputs
if wavpath.endswith("|"):
if args.multi_columns_input:
raise RuntimeError(
"Not supporting multi_columns wav.scp for inputs by"
" pipe"
)
# Streaming input e.g. cat a.wav |
with kaldiio.open_like_kaldi(wavpath, "rb") as f:
with BytesIO(f.read()) as g:
wave, rate = soundfile.read
|
if value in ("none", "None", "NONE"):
return None
return humanfriendly.parse_size(value)
|
identifier_body
|
|
format_wav_scp.py
|
, fs: int) -> np.array:
# Conduct trim wtih vad information
assert check_argument_types()
assert uttid in vad_reader, uttid
vad_info = vad_reader[uttid]
total_length = sum(int((time[1] - time[0]) * fs) for time in vad_info)
new_wav = np.zeros((total_length,), dtype=wav.dtype)
start_frame = 0
for time in vad_info:
# Note: we regard vad as [xxx, yyy)
duration = int((time[1] - time[0]) * fs)
orig_start_frame = int(time[0] * fs)
orig_end_frame = orig_start_frame + duration
end_frame = start_frame + duration
new_wav[start_frame:end_frame] = wav[orig_start_frame:orig_end_frame]
start_frame = end_frame
return new_wav
class SegmentsExtractor:
"""Emulating kaldi extract-segments.cc
Args:
segments (str): The file format is
"<segment-id> <recording-id> <start-time> <end-time>\n"
"e.g. call-861225-A-0050-0065 call-861225-A 5.0 6.5\n"
"""
def __init__(self, fname: str, segments: str = None, multi_columns: bool = False):
assert check_argument_types()
self.wav_scp = fname
self.multi_columns = multi_columns
self.wav_dict = {}
with open(self.wav_scp, "r") as f:
for line in f:
recodeid, wavpath = line.strip().split(None, 1)
if recodeid in self.wav_dict:
raise RuntimeError(f"{recodeid} is duplicated")
self.wav_dict[recodeid] = wavpath
self.segments = segments
self.segments_dict = {}
with open(self.segments, "r") as f:
for line in f:
sps = line.rstrip().split(None)
if len(sps) != 4:
raise RuntimeError("Format is invalid: {}".format(line))
uttid, recodeid, st, et = sps
self.segments_dict[uttid] = (recodeid, float(st), float(et))
if recodeid not in self.wav_dict:
raise RuntimeError(
'Not found "{}" in {}'.format(recodeid, self.wav_scp)
)
def generator(self):
recodeid_counter = {}
for utt, (recodeid, st, et) in self.segments_dict.items():
recodeid_counter[recodeid] = recodeid_counter.get(recodeid, 0) + 1
cached = {}
for utt, (recodeid, st, et) in self.segments_dict.items():
wavpath = self.wav_dict[recodeid]
if recodeid not in cached:
if wavpath.endswith("|"):
if self.multi_columns:
raise RuntimeError(
"Not supporting multi_columns wav.scp for inputs by pipe"
)
# Streaming input e.g. cat a.wav |
with kaldiio.open_like_kaldi(wavpath, "rb") as f:
with BytesIO(f.read()) as g:
array, rate = soundfile.read(g)
else:
if self.multi_columns:
array, rate = soundfile_read(
wavs=wavpath.split(),
dtype=None,
always_2d=False,
concat_axis=1,
)
else:
array, rate = soundfile.read(wavpath)
cached[recodeid] = array, rate
array, rate = cached[recodeid]
# Keep array until the last query
recodeid_counter[recodeid] -= 1
if recodeid_counter[recodeid] == 0:
cached.pop(recodeid)
# Convert starting time of the segment to corresponding sample number.
# If end time is -1 then use the whole file starting from start time.
if et != -1:
array = array[int(st * rate) : int(et * rate)]
else:
array = array[int(st * rate) :]
yield utt, (array, rate), None, None
def main():
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=logfmt)
logging.info(get_commandline_args())
parser = argparse.ArgumentParser(
description='Create waves list from "wav.scp"',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("scp")
parser.add_argument("outdir")
parser.add_argument(
"--name",
default="wav",
help='Specify the prefix word of output file name such as "wav.scp"',
)
parser.add_argument("--segments", default=None)
parser.add_argument(
"--fs",
type=humanfriendly_or_none,
default=None,
help="If the sampling rate specified, Change the sampling rate.",
)
parser.add_argument("--audio-format", default="wav")
parser.add_argument("--vad_based_trim", type=str, default=None)
group = parser.add_mutually_exclusive_group()
group.add_argument("--ref-channels", default=None, type=str2int_tuple)
group.add_argument("--utt2ref-channels", default=None, type=str)
group.add_argument(
"--audio-subtype",
default=None,
type=str,
help=(
"Give a interpretable subtype by soundfile e.g. PCM_16. "
"You can check all available types by soundfile.available_subtypes()"
),
)
parser.add_argument(
"--multi-columns-input",
type=str2bool,
default=False,
help=(
"Enable multi columns mode for input wav.scp. "
"e.g. 'ID a.wav b.wav c.wav' is interpreted as 3ch audio data"
),
)
parser.add_argument(
"--multi-columns-output",
type=str2bool,
default=False,
help=(
"Enable multi columns mode for output wav.scp. "
"e.g. If input audio data has 2ch, "
"each line in wav.scp has the the format like "
"'ID ID-CH0.wav ID-CH1.wav'"
),
)
args = parser.parse_args()
out_num_samples = Path(args.outdir) / "utt2num_samples"
if args.ref_channels is not None:
def utt2ref_channels(x) -> Tuple[int, ...]:
return args.ref_channels
elif args.utt2ref_channels is not None:
utt2ref_channels_dict = read_2columns_text(args.utt2ref_channels)
def
|
(x, d=utt2ref_channels_dict) -> Tuple[int, ...]:
chs_str = d[x]
return tuple(map(int, chs_str.split()))
else:
utt2ref_channels = None
if args.audio_format.endswith("ark") and args.multi_columns_output:
raise RuntimeError("Multi columns wav.scp is not supported for ark type")
Path(args.outdir).mkdir(parents=True, exist_ok=True)
out_wavscp = Path(args.outdir) / f"{args.name}.scp"
if args.audio_format.endswith("ark"):
fark = open(Path(args.outdir) / f"data_{args.name}.ark", "wb")
fscp_out = out_wavscp.open("w")
writer = None
else:
writer = SoundScpWriter(
args.outdir,
out_wavscp,
format=args.audio_format,
multi_columns=args.multi_columns_output,
subtype=args.audio_subtype,
)
fscp_out = None
if args.vad_based_trim is not None:
vad_reader = VADScpReader(args.vad_based_trim)
if args.segments is not None:
extractor = SegmentsExtractor(
args.scp, segments=args.segments, multi_columns=args.multi_columns_input
)
generator = extractor.generator
else:
def generator():
with Path(args.scp).open("r") as fscp:
for line in tqdm(fscp):
uttid, wavpath = line.strip().split(None, 1)
# B.a. Without segments and using pipe inputs
if wavpath.endswith("|"):
if args.multi_columns_input:
raise RuntimeError(
"Not supporting multi_columns wav.scp for inputs by"
" pipe"
)
# Streaming input e.g. cat a.wav |
with kaldiio.open_like_kaldi(wavpath, "rb") as f:
with BytesIO(f.read()) as g:
wave, rate = soundfile.read(g)
subtypes = None
# B.b Without segments and not using pipe
else:
if args.multi_columns_input:
wave, rate, subtypes = soundfile_read(
wavs=wavpath.split(),
dtype=None,
always_2d=False,
concat_axis=1,
return_subtype=True,
)
else:
with soundfile.SoundFile(wavpath) as sf:
rate = sf.samplerate
subtypes = [sf.subtype]
wave = sf.read()
yield uttid, (wave, rate), wavpath, subtypes
with out_num_samples.open("w") as
|
utt2ref_channels
|
identifier_name
|
cv4ag.py
|
('--lonshift',metavar='N.N',
type=float,default=0,
help='Longitudanal shift of training data.')
cmdParser.add_argument('--latshift',metavar='N.N',
type=float,default=0,
help='Lateral shift of training data .')
cmdParser.add_argument('--shiftformat',metavar='N',
type=int,default=0,
help='Format of longitudinal/lateral shift.\
0: As fraction of image. 1: Georeferenced unites.')
cmdParser.add_argument('--top',metavar='N',
type=int,default=15,
help='Get N most frequent classes.')
cmdParser.add_argument('--key',
type=str,default='Descriptio',
help='Set parameter key for category in GIS file to classify data.')
cmdParser.add_argument('--epsg',metavar='N',
type=int,default=None,
help='EPSG format for GIS data. Is read from data if not set.')
cmdParser.add_argument('--layer',metavar='N',
type=int,default=None,
help='Number of layers to be trained on.')
cmdParser.add_argument('--mode',
type=str,default='gpu',
help='GPU (default) or CPU mode')
cmdParser.add_argument('--sat',
type=str,default=None,
help='Folder with training satellite images, if not in standard location.')
cmdParser.add_argument('--batchsize',metavar='N',
type=int,default=None,
help='Size of training batch (1-4)')
cmdParser.add_argument('--stepsize',metavar='N.N',
type=float,default=None,
help='Size of training step')
cmdParser.add_argument('--maxiter',metavar='N',
type=int,default=None,
help='Maximum iterations at training stage')
cmdParser.add_argument('--datatype',
type=str,default='PNG',
help='Datatype of training input date (PNG, LMDB, LMDB2 or HDF5)')
cmdParser.add_argument('--arg1',
type=str,default=None,
help='Argument 1 for script.')
cmdParser.add_argument('--arg2',
type=str,default=None,
help='Argument 2 for script.')
cmdParser.add_argument('--arg3',
type=str,default=None,
help='Argument 3 for script.')
cmdParser.add_argument('--arg4',
type=str,default=None,
help='Argument 4 for script.')
testParser = cmdParser.add_mutually_exclusive_group(required=False)
testParser.add_argument('--test', dest='test', action='store_true',help='Create test set.')
testParser.add_argument('--no-test', dest='test', action='store_false',help='Do not create test set (default)')
cmdParser.set_defaults(test=False)
backgroundParser = cmdParser.add_mutually_exclusive_group(required=False)
backgroundParser.add_argument('--background', dest='b', action='store_false',help='Classify background for training (default)')
backgroundParser.add_argument('--no-background', dest='b', action='store_true',help='Ignore background for training.')
cmdParser.set_defaults(b=False)
randomParser = cmdParser.add_mutually_exclusive_group(required=False)
randomParser.add_argument('--random', dest='randomImages', action='store_true',help='Use random images within GIS boundary box.')
randomParser.add_argument('--no-random', dest='randomImages', action='store_false',help='Only use images with features (default).')
cmdParser.set_defaults(randomImages=False)
weightParser = cmdParser.add_mutually_exclusive_group(required=False)
weightParser.add_argument('--weights', dest='initweights', action='store_true',help='Initialize weights according to frequency statistics (default).')
weightParser.add_argument('--no-weights', dest='initweights', action='store_false',help='Do not initialize weights.')
cmdParser.set_defaults(initweights=True)
compareParser = cmdParser.add_mutually_exclusive_group(required=False)
compareParser.add_argument('--compares', dest='compare', action='store_true',help='Compare classified results with labels')
compareParser.add_argument('--no-compares', dest='compare', action='store_false',help='Do not compare classified results with labels')
cmdParser.set_defaults(compare=True)
cmdArgs = vars(cmdParser.parse_args())
selectedModule = cmdArgs.get('module')
mapboxtoken = cmdArgs.get('mapbox_token')
inputFile = cmdArgs.get('i')
outputFolder = cmdArgs.get('o')
zoomLevel= cmdArgs.get('z')
datatype = cmdArgs.get('d')
satelliteCount = cmdArgs.get('c')
xpixel = cmdArgs.get('x')
ypixel = cmdArgs.get('y')
scriptFile = cmdArgs.get('s')
net = cmdArgs.get('n')
scriptArg1 = cmdArgs.get('arg1')
scriptArg2 = cmdArgs.get('arg2')
scriptArg3 = cmdArgs.get('arg3')
scriptArg4 = cmdArgs.get('arg4')
scriptArg4 = cmdArgs.get('arg4')
lonshift= cmdArgs.get('lonshift')
latshift= cmdArgs.get('latshift')
layernumber = cmdArgs.get('layer')
shiftformat = cmdArgs.get('shiftformat')
key = cmdArgs.get('key')
mode = cmdArgs.get('mode')
top = cmdArgs.get('top')
epsg = cmdArgs.get('epsg')
test = cmdArgs.get('test')
batchsize = cmdArgs.get('batchsize')
maxiter = cmdArgs.get('maxiter')
stepsize = cmdArgs.get('stepsize')
datatype = cmdArgs.get('datatype')
sat = cmdArgs.get('sat')
b = cmdArgs.get('b')
randomImages = cmdArgs.get('randomImages')
initweights = cmdArgs.get('initweights')
compare = cmdArgs.get('compare')
# Execute according to options
print "Option:",selectedModule
#only import caffe if needed
if selectedModule == 'all' or selectedModule=='train' or selectedModule=='ml':
import train,applyml
if selectedModule == 'all':
inputFile,stats,freq,elements=\
parse.parse(inputFile=inputFile,outputFolder=outputFolder,
scriptFile=scriptFile,datatype=datatype,top=top,layernumber=layernumber,
key=key,
scriptArg1=scriptArg1,scriptArg2=scriptArg2,
scriptArg3=scriptArg3,scriptArg4=scriptArg4)
get_satellite.get_satellite(inputFile=inputFile,
mapboxtoken=mapboxtoken,
count=satelliteCount,
zoomLevel=zoomLevel,
outputFolder=outputFolder,
epsg=epsg,
xpixel=xpixel,
ypixel=ypixel,
randomImages=randomImages,
elements=elements)
overlay.overlay(outputFolder,inputFile,
xpixel=xpixel,
ypixel=ypixel,
zoomLevel=zoomLevel,
lonshift=lonshift,latshift=latshift,
shiftformat=shiftformat,
top=top,
stats=stats,
count=satelliteCount,
epsg=epsg,
key=key,
sat=sat,
randomImages=randomImages,
elements=elements\
)
train.train(outputFolder=outputFolder,
inputFile=inputFile,
net=net,
top=top,
key=key,
mode=mode,
xpixel=xpixel,
ypixel=ypixel,
stats=stats,
freq=freq,
elements=elements,
ignorebackground=b,
initweights=initweights,
batchsize=batchsize,
maxiter=maxiter,
stepsize=stepsize,
datatype=datatype,
createTest=test\
)
applyml.apply(outputFolder,
inputFile,
mode=mode,
ignorebackground=b,
#stats=stats,
epsg=epsg,
top=top,
compare=compare,
key=key)
elif selectedModule == 'parse':
parse.parse(inputFile=inputFile,outputFolder=outputFolder,
scriptFile=scriptFile,datatype=datatype,top=top,layernumber=layernumber,
key=key,
scriptArg1=scriptArg1,scriptArg2=scriptArg2,
scriptArg3=scriptArg3,scriptArg4=scriptArg4)
elif selectedModule == 'satellite':
get_satellite.get_satellite(inputFile=inputFile,
mapboxtoken=mapboxtoken,
count=satelliteCount,
zoomLevel=zoomLevel,
epsg=epsg,
outputFolder=outputFolder,
randomImages=randomImages,
xpixel=xpixel,
ypixel=ypixel)
elif selectedModule == 'overlay':
overlay.overlay(outputFolder,inputFile,
xpixel=xpixel,
ypixel=ypixel,
zoomLevel=zoomLevel,
lonshift=lonshift,latshift=latshift,
shiftformat=shiftformat,
top=top,
epsg=epsg,
sat=sat,
count=satelliteCount,
randomImages=randomImages,
key=key
)
elif selectedModule == 'train':
|
train.train(outputFolder=outputFolder,
inputFile=inputFile,
net=net,
top=top,
key=key,
mode=mode,
xpixel=xpixel,
ypixel=ypixel,
ignorebackground=b,
batchsize=batchsize,
maxiter=maxiter,
datatype=datatype,
stepsize=stepsize,
initweights=initweights,
createTest=test\
)
|
conditional_block
|
|
cv4ag.py
|
(argparse.ArgumentParser): # override error message to show usage
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
cmdParser = myParse(\
description='Machine Learning Framework for Agricultural Data.',
add_help=True)
cmdParser.add_argument('module',
metavar='OPTION',
type=str,default=False,
help='The modules to be loaded. OPTION: \n\
all - all modules (except clear).\n\
parse - input file parser.\n\
satellite - get satellite data.\n\
overlay - overlay classification with satellite data. \n\
train - train.\n\
ml - apply machine learning algorithm.\n\
clear - clear generated data from previous run on input file')
cmdParser.add_argument('mapbox_token',
metavar='MAPBOX_TOKEN',
type=str,default=False,nargs='?',
help='Mapbox token to download satellite images .')
cmdParser.add_argument('-i',
type=str,default=None,metavar='FILE',
help='Input file. Do not give if data obtained by script.')
cmdParser.add_argument('-s',metavar='FILE',
type=str,default=None,
help='Script file to obtain data')
cmdParser.add_argument('-o',metavar='PATH',
type=str,default="data/",
help='Output folder. Satellite data are put in and read from\
PATH/sat/.')
cmdParser.add_argument('-c',metavar='N',
type=int,default=1000,
help='Number of satellite images to download.')
cmdParser.add_argument('-z',metavar='N',
type=int,default=17,
help='Zoom level. Min=15, Max=19. See libs/satellite_resolutions.csv for resolutions.')
cmdParser.add_argument('-x',metavar='N',
type=int,default=480,
help='Images have width N pixel.')
cmdParser.add_argument('-y',metavar='N',
type=int,default=360,
help='Images have height N pixel.')
cmdParser.add_argument('-d',metavar='FILETYPE_CODE',
type=str,default=None,
help='Specify file type. Will find to detect filetype automatically. \
Will not prompt for vector conversion if not given.\
See www.gdal.org/formats_list.html or\
www.gdal.org/ogr_formats.html \
(or libs/*_formats.csv for FILETYPE_CODEs.')
cmdParser.add_argument('-n',metavar='N',
type=int,default=1,
help='Accuracy of neural net. 0: lowest. 3: highest.')
cmdParser.add_argument('--lonshift',metavar='N.N',
type=float,default=0,
help='Longitudanal shift of training data.')
cmdParser.add_argument('--latshift',metavar='N.N',
type=float,default=0,
help='Lateral shift of training data .')
cmdParser.add_argument('--shiftformat',metavar='N',
type=int,default=0,
help='Format of longitudinal/lateral shift.\
0: As fraction of image. 1: Georeferenced unites.')
cmdParser.add_argument('--top',metavar='N',
type=int,default=15,
help='Get N most frequent classes.')
cmdParser.add_argument('--key',
type=str,default='Descriptio',
help='Set parameter key for category in GIS file to classify data.')
cmdParser.add_argument('--epsg',metavar='N',
type=int,default=None,
help='EPSG format for GIS data. Is read from data if not set.')
cmdParser.add_argument('--layer',metavar='N',
type=int,default=None,
help='Number of layers to be trained on.')
cmdParser.add_argument('--mode',
type=str,default='gpu',
help='GPU (default) or CPU mode')
cmdParser.add_argument('--sat',
type=str,default=None,
help='Folder with training satellite images, if not in standard location.')
cmdParser.add_argument('--batchsize',metavar='N',
type=int,default=None,
help='Size of training batch (1-4)')
cmdParser.add_argument('--stepsize',metavar='N.N',
type=float,default=None,
help='Size of training step')
cmdParser.add_argument('--maxiter',metavar='N',
type=int,default=None,
help='Maximum iterations at training stage')
cmdParser.add_argument('--datatype',
type=str,default='PNG',
help='Datatype of training input date (PNG, LMDB, LMDB2 or HDF5)')
cmdParser.add_argument('--arg1',
type=str,default=None,
help='Argument 1 for script.')
cmdParser.add_argument('--arg2',
type=str,default=None,
help='Argument 2 for script.')
cmdParser.add_argument('--arg3',
type=str,default=None,
help='Argument 3 for script.')
cmdParser.add_argument('--arg4',
type=str,default=None,
help='Argument 4 for script.')
testParser = cmdParser.add_mutually_exclusive_group(required=False)
testParser.add_argument('--test', dest='test', action='store_true',help='Create test set.')
testParser.add_argument('--no-test', dest='test', action='store_false',help='Do not create test set (default)')
cmdParser.set_defaults(test=False)
backgroundParser = cmdParser.add_mutually_exclusive_group(required=False)
backgroundParser.add_argument('--background', dest='b', action='store_false',help='Classify background for training (default)')
backgroundParser.add_argument('--no-background', dest='b', action='store_true',help='Ignore background for training.')
cmdParser.set_defaults(b=False)
randomParser = cmdParser.add_mutually_exclusive_group(required=False)
randomParser.add_argument('--random', dest='randomImages', action='store_true',help='Use random images within GIS boundary box.')
randomParser.add_argument('--no-random', dest='randomImages', action='store_false',help='Only use images with features (default).')
cmdParser.set_defaults(randomImages=False)
weightParser = cmdParser.add_mutually_exclusive_group(required=False)
weightParser.add_argument('--weights', dest='initweights', action='store_true',help='Initialize weights according to frequency statistics (default).')
weightParser.add_argument('--no-weights', dest='initweights', action='store_false',help='Do not initialize weights.')
cmdParser.set_defaults(initweights=True)
compareParser = cmdParser.add_mutually_exclusive_group(required=False)
compareParser.add_argument('--compares', dest='compare', action='store_true',help='Compare classified results with labels')
compareParser.add_argument('--no-compares', dest='compare', action='store_false',help='Do not compare classified results with labels')
cmdParser.set_defaults(compare=True)
cmdArgs = vars(cmdParser.parse_args())
selectedModule = cmdArgs.get('module')
mapboxtoken = cmdArgs.get('mapbox_token')
inputFile = cmdArgs.get('i')
outputFolder = cmdArgs.get('o')
zoomLevel= cmdArgs.get('z')
datatype = cmdArgs.get('d')
satelliteCount = cmdArgs.get('c')
xpixel = cmdArgs.get('x')
ypixel = cmdArgs.get('y')
scriptFile = cmdArgs.get('s')
net = cmdArgs.get('n')
scriptArg1 = cmdArgs.get('arg1')
scriptArg2 = cmdArgs.get('arg2')
scriptArg3 = cmdArgs.get('arg3')
scriptArg4 = cmdArgs.get('arg4')
scriptArg4 = cmdArgs.get('arg4')
lonshift= cmdArgs.get('lonshift')
latshift= cmdArgs.get('latshift')
layernumber = cmdArgs.get('layer')
shiftformat = cmdArgs.get('shiftformat')
key = cmdArgs.get('key')
mode = cmdArgs.get('mode')
top = cmdArgs.get('top')
epsg = cmdArgs.get('epsg')
test = cmdArgs.get('test')
batchsize = cmdArgs.get('batchsize')
maxiter = cmdArgs.get('maxiter')
stepsize = cmdArgs.get('stepsize')
datatype = cmdArgs.get('datatype')
sat = cmdArgs.get('sat')
b = cmdArgs.get('b')
randomImages = cmdArgs.get('randomImages')
initweights = cmdArgs.get('initweights')
compare = cmdArgs.get('compare')
# Execute according to options
print "Option:",selectedModule
#only import caffe if needed
if selectedModule == 'all' or selectedModule=='train' or selectedModule=='ml':
import train,applyml
if selectedModule == 'all':
inputFile,stats,freq,elements=\
parse.parse(inputFile=inputFile,outputFolder=outputFolder,
scriptFile=scriptFile,datatype=datatype,top=top,layernumber=layernumber,
key=key,
scriptArg1=scriptArg1,scriptArg2=scriptArg2,
scriptArg3=scriptArg3,scriptArg4=scriptArg4)
get_satellite.get_satellite(inputFile=inputFile,
mapboxtoken=mapboxtoken,
count=satelliteCount,
zoomLevel=zoomLevel,
outputFolder=outputFolder,
epsg=epsg,
xpixel=xpixel,
ypixel=ypixel,
randomImages=randomImages,
elements=elements)
overlay.overlay(outputFolder,inputFile,
xpixel=xpixel,
ypixel=ypixel,
zoomLevel=zoomLevel,
|
myParse
|
identifier_name
|
|
cv4ag.py
|
modules to be loaded. OPTION: \n\
all - all modules (except clear).\n\
parse - input file parser.\n\
satellite - get satellite data.\n\
overlay - overlay classification with satellite data. \n\
train - train.\n\
ml - apply machine learning algorithm.\n\
clear - clear generated data from previous run on input file')
cmdParser.add_argument('mapbox_token',
metavar='MAPBOX_TOKEN',
type=str,default=False,nargs='?',
help='Mapbox token to download satellite images .')
cmdParser.add_argument('-i',
type=str,default=None,metavar='FILE',
help='Input file. Do not give if data obtained by script.')
cmdParser.add_argument('-s',metavar='FILE',
type=str,default=None,
help='Script file to obtain data')
cmdParser.add_argument('-o',metavar='PATH',
type=str,default="data/",
help='Output folder. Satellite data are put in and read from\
PATH/sat/.')
cmdParser.add_argument('-c',metavar='N',
type=int,default=1000,
help='Number of satellite images to download.')
cmdParser.add_argument('-z',metavar='N',
type=int,default=17,
help='Zoom level. Min=15, Max=19. See libs/satellite_resolutions.csv for resolutions.')
cmdParser.add_argument('-x',metavar='N',
type=int,default=480,
help='Images have width N pixel.')
cmdParser.add_argument('-y',metavar='N',
type=int,default=360,
help='Images have height N pixel.')
cmdParser.add_argument('-d',metavar='FILETYPE_CODE',
type=str,default=None,
help='Specify file type. Will find to detect filetype automatically. \
Will not prompt for vector conversion if not given.\
See www.gdal.org/formats_list.html or\
www.gdal.org/ogr_formats.html \
(or libs/*_formats.csv for FILETYPE_CODEs.')
cmdParser.add_argument('-n',metavar='N',
type=int,default=1,
help='Accuracy of neural net. 0: lowest. 3: highest.')
cmdParser.add_argument('--lonshift',metavar='N.N',
type=float,default=0,
help='Longitudanal shift of training data.')
cmdParser.add_argument('--latshift',metavar='N.N',
type=float,default=0,
help='Lateral shift of training data .')
cmdParser.add_argument('--shiftformat',metavar='N',
type=int,default=0,
help='Format of longitudinal/lateral shift.\
0: As fraction of image. 1: Georeferenced unites.')
cmdParser.add_argument('--top',metavar='N',
type=int,default=15,
help='Get N most frequent classes.')
cmdParser.add_argument('--key',
type=str,default='Descriptio',
help='Set parameter key for category in GIS file to classify data.')
cmdParser.add_argument('--epsg',metavar='N',
type=int,default=None,
help='EPSG format for GIS data. Is read from data if not set.')
cmdParser.add_argument('--layer',metavar='N',
type=int,default=None,
help='Number of layers to be trained on.')
cmdParser.add_argument('--mode',
type=str,default='gpu',
help='GPU (default) or CPU mode')
cmdParser.add_argument('--sat',
type=str,default=None,
help='Folder with training satellite images, if not in standard location.')
cmdParser.add_argument('--batchsize',metavar='N',
type=int,default=None,
help='Size of training batch (1-4)')
cmdParser.add_argument('--stepsize',metavar='N.N',
type=float,default=None,
help='Size of training step')
cmdParser.add_argument('--maxiter',metavar='N',
type=int,default=None,
help='Maximum iterations at training stage')
cmdParser.add_argument('--datatype',
type=str,default='PNG',
help='Datatype of training input date (PNG, LMDB, LMDB2 or HDF5)')
cmdParser.add_argument('--arg1',
type=str,default=None,
help='Argument 1 for script.')
cmdParser.add_argument('--arg2',
type=str,default=None,
help='Argument 2 for script.')
cmdParser.add_argument('--arg3',
type=str,default=None,
help='Argument 3 for script.')
cmdParser.add_argument('--arg4',
type=str,default=None,
help='Argument 4 for script.')
testParser = cmdParser.add_mutually_exclusive_group(required=False)
testParser.add_argument('--test', dest='test', action='store_true',help='Create test set.')
testParser.add_argument('--no-test', dest='test', action='store_false',help='Do not create test set (default)')
|
cmdParser.set_defaults(b=False)
randomParser = cmdParser.add_mutually_exclusive_group(required=False)
randomParser.add_argument('--random', dest='randomImages', action='store_true',help='Use random images within GIS boundary box.')
randomParser.add_argument('--no-random', dest='randomImages', action='store_false',help='Only use images with features (default).')
cmdParser.set_defaults(randomImages=False)
weightParser = cmdParser.add_mutually_exclusive_group(required=False)
weightParser.add_argument('--weights', dest='initweights', action='store_true',help='Initialize weights according to frequency statistics (default).')
weightParser.add_argument('--no-weights', dest='initweights', action='store_false',help='Do not initialize weights.')
cmdParser.set_defaults(initweights=True)
compareParser = cmdParser.add_mutually_exclusive_group(required=False)
compareParser.add_argument('--compares', dest='compare', action='store_true',help='Compare classified results with labels')
compareParser.add_argument('--no-compares', dest='compare', action='store_false',help='Do not compare classified results with labels')
cmdParser.set_defaults(compare=True)
cmdArgs = vars(cmdParser.parse_args())
selectedModule = cmdArgs.get('module')
mapboxtoken = cmdArgs.get('mapbox_token')
inputFile = cmdArgs.get('i')
outputFolder = cmdArgs.get('o')
zoomLevel= cmdArgs.get('z')
datatype = cmdArgs.get('d')
satelliteCount = cmdArgs.get('c')
xpixel = cmdArgs.get('x')
ypixel = cmdArgs.get('y')
scriptFile = cmdArgs.get('s')
net = cmdArgs.get('n')
scriptArg1 = cmdArgs.get('arg1')
scriptArg2 = cmdArgs.get('arg2')
scriptArg3 = cmdArgs.get('arg3')
scriptArg4 = cmdArgs.get('arg4')
scriptArg4 = cmdArgs.get('arg4')
lonshift= cmdArgs.get('lonshift')
latshift= cmdArgs.get('latshift')
layernumber = cmdArgs.get('layer')
shiftformat = cmdArgs.get('shiftformat')
key = cmdArgs.get('key')
mode = cmdArgs.get('mode')
top = cmdArgs.get('top')
epsg = cmdArgs.get('epsg')
test = cmdArgs.get('test')
batchsize = cmdArgs.get('batchsize')
maxiter = cmdArgs.get('maxiter')
stepsize = cmdArgs.get('stepsize')
datatype = cmdArgs.get('datatype')
sat = cmdArgs.get('sat')
b = cmdArgs.get('b')
randomImages = cmdArgs.get('randomImages')
initweights = cmdArgs.get('initweights')
compare = cmdArgs.get('compare')
# Execute according to options
print "Option:",selectedModule
#only import caffe if needed
if selectedModule == 'all' or selectedModule=='train' or selectedModule=='ml':
import train,applyml
if selectedModule == 'all':
inputFile,stats,freq,elements=\
parse.parse(inputFile=inputFile,outputFolder=outputFolder,
scriptFile=scriptFile,datatype=datatype,top=top,layernumber=layernumber,
key=key,
scriptArg1=scriptArg1,scriptArg2=scriptArg2,
scriptArg3=scriptArg3,scriptArg4=scriptArg4)
get_satellite.get_satellite(inputFile=inputFile,
mapboxtoken=mapboxtoken,
count=satelliteCount,
zoomLevel=zoomLevel,
outputFolder=outputFolder,
epsg=epsg,
xpixel=xpixel,
ypixel=ypixel,
randomImages=randomImages,
elements=elements)
overlay.overlay(outputFolder,inputFile,
xpixel=xpixel,
ypixel=ypixel,
zoomLevel=zoomLevel,
lonshift=lonshift,latshift=latshift,
shiftformat=shiftformat,
top=top,
stats=stats,
count=satelliteCount,
epsg=epsg,
key=key,
sat=sat,
randomImages=randomImages,
elements=elements\
)
train.train(outputFolder=outputFolder,
inputFile=inputFile,
net=net,
top
|
cmdParser.set_defaults(test=False)
backgroundParser = cmdParser.add_mutually_exclusive_group(required=False)
backgroundParser.add_argument('--background', dest='b', action='store_false',help='Classify background for training (default)')
backgroundParser.add_argument('--no-background', dest='b', action='store_true',help='Ignore background for training.')
|
random_line_split
|
cv4ag.py
|
cmdParser = myParse(\
description='Machine Learning Framework for Agricultural Data.',
add_help=True)
cmdParser.add_argument('module',
metavar='OPTION',
type=str,default=False,
help='The modules to be loaded. OPTION: \n\
all - all modules (except clear).\n\
parse - input file parser.\n\
satellite - get satellite data.\n\
overlay - overlay classification with satellite data. \n\
train - train.\n\
ml - apply machine learning algorithm.\n\
clear - clear generated data from previous run on input file')
cmdParser.add_argument('mapbox_token',
metavar='MAPBOX_TOKEN',
type=str,default=False,nargs='?',
help='Mapbox token to download satellite images .')
cmdParser.add_argument('-i',
type=str,default=None,metavar='FILE',
help='Input file. Do not give if data obtained by script.')
cmdParser.add_argument('-s',metavar='FILE',
type=str,default=None,
help='Script file to obtain data')
cmdParser.add_argument('-o',metavar='PATH',
type=str,default="data/",
help='Output folder. Satellite data are put in and read from\
PATH/sat/.')
cmdParser.add_argument('-c',metavar='N',
type=int,default=1000,
help='Number of satellite images to download.')
cmdParser.add_argument('-z',metavar='N',
type=int,default=17,
help='Zoom level. Min=15, Max=19. See libs/satellite_resolutions.csv for resolutions.')
cmdParser.add_argument('-x',metavar='N',
type=int,default=480,
help='Images have width N pixel.')
cmdParser.add_argument('-y',metavar='N',
type=int,default=360,
help='Images have height N pixel.')
cmdParser.add_argument('-d',metavar='FILETYPE_CODE',
type=str,default=None,
help='Specify file type. Will find to detect filetype automatically. \
Will not prompt for vector conversion if not given.\
See www.gdal.org/formats_list.html or\
www.gdal.org/ogr_formats.html \
(or libs/*_formats.csv for FILETYPE_CODEs.')
cmdParser.add_argument('-n',metavar='N',
type=int,default=1,
help='Accuracy of neural net. 0: lowest. 3: highest.')
cmdParser.add_argument('--lonshift',metavar='N.N',
type=float,default=0,
help='Longitudanal shift of training data.')
cmdParser.add_argument('--latshift',metavar='N.N',
type=float,default=0,
help='Lateral shift of training data .')
cmdParser.add_argument('--shiftformat',metavar='N',
type=int,default=0,
help='Format of longitudinal/lateral shift.\
0: As fraction of image. 1: Georeferenced unites.')
cmdParser.add_argument('--top',metavar='N',
type=int,default=15,
help='Get N most frequent classes.')
cmdParser.add_argument('--key',
type=str,default='Descriptio',
help='Set parameter key for category in GIS file to classify data.')
cmdParser.add_argument('--epsg',metavar='N',
type=int,default=None,
help='EPSG format for GIS data. Is read from data if not set.')
cmdParser.add_argument('--layer',metavar='N',
type=int,default=None,
help='Number of layers to be trained on.')
cmdParser.add_argument('--mode',
type=str,default='gpu',
help='GPU (default) or CPU mode')
cmdParser.add_argument('--sat',
type=str,default=None,
help='Folder with training satellite images, if not in standard location.')
cmdParser.add_argument('--batchsize',metavar='N',
type=int,default=None,
help='Size of training batch (1-4)')
cmdParser.add_argument('--stepsize',metavar='N.N',
type=float,default=None,
help='Size of training step')
cmdParser.add_argument('--maxiter',metavar='N',
type=int,default=None,
help='Maximum iterations at training stage')
cmdParser.add_argument('--datatype',
type=str,default='PNG',
help='Datatype of training input date (PNG, LMDB, LMDB2 or HDF5)')
cmdParser.add_argument('--arg1',
type=str,default=None,
help='Argument 1 for script.')
cmdParser.add_argument('--arg2',
type=str,default=None,
help='Argument 2 for script.')
cmdParser.add_argument('--arg3',
type=str,default=None,
help='Argument 3 for script.')
cmdParser.add_argument('--arg4',
type=str,default=None,
help='Argument 4 for script.')
testParser = cmdParser.add_mutually_exclusive_group(required=False)
testParser.add_argument('--test', dest='test', action='store_true',help='Create test set.')
testParser.add_argument('--no-test', dest='test', action='store_false',help='Do not create test set (default)')
cmdParser.set_defaults(test=False)
backgroundParser = cmdParser.add_mutually_exclusive_group(required=False)
backgroundParser.add_argument('--background', dest='b', action='store_false',help='Classify background for training (default)')
backgroundParser.add_argument('--no-background', dest='b', action='store_true',help='Ignore background for training.')
cmdParser.set_defaults(b=False)
randomParser = cmdParser.add_mutually_exclusive_group(required=False)
randomParser.add_argument('--random', dest='randomImages', action='store_true',help='Use random images within GIS boundary box.')
randomParser.add_argument('--no-random', dest='randomImages', action='store_false',help='Only use images with features (default).')
cmdParser.set_defaults(randomImages=False)
weightParser = cmdParser.add_mutually_exclusive_group(required=False)
weightParser.add_argument('--weights', dest='initweights', action='store_true',help='Initialize weights according to frequency statistics (default).')
weightParser.add_argument('--no-weights', dest='initweights', action='store_false',help='Do not initialize weights.')
cmdParser.set_defaults(initweights=True)
compareParser = cmdParser.add_mutually_exclusive_group(required=False)
compareParser.add_argument('--compares', dest='compare', action='store_true',help='Compare classified results with labels')
compareParser.add_argument('--no-compares', dest='compare', action='store_false',help='Do not compare classified results with labels')
cmdParser.set_defaults(compare=True)
cmdArgs = vars(cmdParser.parse_args())
selectedModule = cmdArgs.get('module')
mapboxtoken = cmdArgs.get('mapbox_token')
inputFile = cmdArgs.get('i')
outputFolder = cmdArgs.get('o')
zoomLevel= cmdArgs.get('z')
datatype = cmdArgs.get('d')
satelliteCount = cmdArgs.get('c')
xpixel = cmdArgs.get('x')
ypixel = cmdArgs.get('y')
scriptFile = cmdArgs.get('s')
net = cmdArgs.get('n')
scriptArg1 = cmdArgs.get('arg1')
scriptArg2 = cmdArgs.get('arg2')
scriptArg3 = cmdArgs.get('arg3')
scriptArg4 = cmdArgs.get('arg4')
scriptArg4 = cmdArgs.get('arg4')
lonshift= cmdArgs.get('lonshift')
latshift= cmdArgs.get('latshift')
layernumber = cmdArgs.get('layer')
shiftformat = cmdArgs.get('shiftformat')
key = cmdArgs.get('key')
mode = cmdArgs.get('mode')
top = cmdArgs.get('top')
epsg = cmdArgs.get('epsg')
test = cmdArgs.get('test')
batchsize = cmdArgs.get('batchsize')
maxiter = cmdArgs.get('maxiter')
stepsize = cmdArgs.get('stepsize')
datatype = cmdArgs.get('datatype')
sat = cmdArgs.get('sat')
b = cmdArgs.get('b')
randomImages = cmdArgs.get('randomImages')
initweights = cmdArgs.get('initweights')
compare = cmdArgs.get('compare')
# Execute according to options
print "Option:",selectedModule
#only import caffe if needed
if selectedModule == 'all' or selectedModule=='train' or selectedModule=='ml':
import train,applyml
if selectedModule == 'all':
inputFile,stats,freq,elements=\
parse.parse(inputFile=inputFile,outputFolder=outputFolder,
scriptFile=scriptFile,datatype=datatype,top=top,layernumber=layernumber,
key=key,
scriptArg1=scriptArg1,scriptArg2=scriptArg2,
scriptArg3=scriptArg3,scriptArg4=scriptArg4)
get_satellite.get_satellite(inputFile=inputFile,
mapboxtoken=mapboxtoken,
count=satelliteCount,
zoomLevel=zoomLevel,
outputFolder=outputFolder,
epsg=epsg,
xpixel=xpixel,
ypixel=ypixel,
randomImages=randomImages,
elements=elements)
overlay.overlay(outputFolder,inputFile,
xpixel=xpixel,
ypixel=ypixel,
zoomLevel=zoomLevel,
lonshift=lonshift,latshift=latshift,
|
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
|
identifier_body
|
|
movie_review_NaiveBayes.py
|
pos/', 0)
#test_neg = get_files('../datasets/movie_reviews/data/subset/test/neg/', 1)
print('* TEST DATA * ')
print('# positives reviews: ', len(test_pos))
print('# negatives reviews', len(test_neg))
test_data = test_pos + test_neg
print('# total reviews: ', len(test_data))
# Does not want a 50/50 split between training and test
# Therefore creates one big set of data that later will be split into 80/20 train- and testdata
# a = train_data[::2]
# b = train_data[1::2]
# c = test_data[::2]
# d = test_data[1::2]
# all_reviews = a + b + c + d'
all_r = train_data + test_data
shuffle(all_r)
# In[ ]:
all_reviews = all_r
# In[ ]:
stopwords = []
try:
f = open('../datasets/stopwords.txt', 'r')
stopwords = f.read().split(',')
except IOError:
print('Problem opening file')
finally:
f.close()
# In[ ]:
# * * * PREPROCESSING * * *
stemmer = PorterStemmer()
preprocessed_reviews = []
for t in all_reviews:
#print(len(preprocessed_reviews))
review = t[0]
review_type = t[1]
# Remove whitespace and punctutation
text = re.sub('[' + string.punctuation + ']', ' ', review)
text = re.sub('[\n\t\r]', '', text)
# Split words into list
words = text.split()
new = []
# Remove stopwords and stem remaining words
for word in words:
stemmed_word = stemmer.stem(word.lower())
if stemmed_word not in stopwords and len(stemmed_word) > 2:
new.append(stemmed_word)
# Add to preproccesed list
preprocessed_reviews.append((new, review_type))
# In[ ]:
count = 0
for r in preprocessed_reviews:
words = r[0]
for w in words:
if w in stopwords:
count += 1
a = 191569
count
# In[ ]:
# Splitting data in trainingdata and testdata (80-20 ratio)
total = len(preprocessed_reviews) #Total number of reviews
test_number = int(0.20 * total) # Number of testing reviews
# Picking randomly
print(test_number)
copy = preprocessed_reviews[:]
test_set = []
taken = {}
while len(test_set) < test_number:
#print(len(train_texts))
num = random.randint(0, test_number - 1)
if num not in taken.keys():
test_set.append(copy.pop(num))
taken[num] = 1
train_set = copy[:] # Trainset is the remaining reviews
len(train_set)/total, len(test_set)/total, len(train_set), len(test_set)
# In[ ]:
# * * * TRAINING THE MODEL * * *
# meaning: Computing probabilities needed for P(Positive|Word)
def total_goods_and_bads(tset):
goods = 0
bads = 0
for t in tset:
goods += 1 if t[1] == 0 else 0
bads += 1 if t[1] == 1 else 0
return goods, bads
total_positive = total_goods_and_bads(train_set)[0]
total_negative = total_goods_and_bads(train_set)[1]
print(total_positive)
print(total_negative)
# In[ ]:
# First making a word counter for pos and neg reviews
pos_word_counter = {}
neg_word_counter = {}
total_words = 0
for t in train_set:
review = t[0]
review_type = t[1]
already_counted = []
for word in review:
total_words += 1
if review_type == 0:
if word not in pos_word_counter:
pos_word_counter[word] = 1
else:
if word not in already_counted:
pos_word_counter[word] += 1
else:
if word not in neg_word_counter:
neg_word_counter[word] = 1
else:
|
already_counted.append(word)
total_words
# In[ ]:
# Removes words that are not inluded in at least 0.15% of the reviews
removed_words = 0
for j in range(len(train_set)):
words = train_set[j][0]
i = 0
while i < len(words):
word = words[i]
word_removed = False
if word in pos_word_counter:
if pos_word_counter[word] < 0.0015*len(train_set):
train_set[j][0].remove(word)
word_removed = True
removed_words += 1
elif word in neg_word_counter:
if neg_word_counter[word] < 0.0015*len(train_set):
train_set[j][0].remove(word)
word_removed = True
removed_words += 1
if not word_removed:
i += 1
j += 1
removed_words
# In[ ]:
def sort_dict(dicti, end):
# Sorterer etter value i dict, gir liste med tupler
most_common_words = sorted(dicti.items(), key = lambda kv: kv[1])
most_common_words.reverse()
most_common_words = most_common_words[:end]
# Lager dict på formen {word: count, ...}
# Vil ha dict fremfor liste med tupler, pga. senere søk
return dict(most_common_words)
most_used_words_pos = sort_dict(pos_word_counter, 25)
most_used_words_neg = sort_dict(neg_word_counter, 25)
most_used_words_pos
# In[ ]:
# Need these 4 probabilities
# 1) Probability that a word appears in positive reviews
# 2) Probability that a word appears in negative reviews
# 3) Overall probability that any given review is positive
# 4) Overall probability that any given reviews is negative
# # Making a dictionary with probabilities for different words appearing in good and bad reviews
# # Example: {'bad': (0.0881, 0.3226)}
probability_appearing = {}
for t in train_set:
text = t[0]
for word in text:
if word not in probability_appearing:
if word in pos_word_counter:
p_appearing_good = pos_word_counter[word]/total_positive
else:
p_appearing_good = 0.1
if word in neg_word_counter:
p_appearing_bad = neg_word_counter[word]/total_negative
else:
p_appearing_bad = 0.1
probability_appearing[word] = (p_appearing_good, p_appearing_bad)
p_pos = total_positive/len(train_set)
p_neg = total_negative/len(train_set)
print(p_good)
print(p_bad)
# Finally we can compute P(Positive | Word)
def p_is_positive_given_word(word):
return (probability_appearing[word][0]*p_pos)/((probability_appearing[word][0]*p_pos + probability_appearing[word][1]*p_neg))
def p_is_negative_given_word(word):
return (probability_appearing[word][1]*p_neg)/((probability_appearing[word][1]*p_neg + probability_appearing[word][0]*p_pos))
p_is_positive_given_word('bad'), p_is_negative_given_word('bad')
# In[ ]:
probabilities = {}
for t in train_set:
text = t[0]
for word in text:
if word not in probabilities:
p_pos = p_is_positive_given_word(word)
p_neg = p_is_negative_given_word(word)
if p_pos == 0:
p_pos = 0.1 # tweaking this value
if p_pos == 1:
p_pos = 0.98
if p_neg == 0:
p_neg = 0.1
if p_neg == 1:
p_neg = 0.98
probabilities[word] = (p_pos, p_neg)
# In[ ]:
# Filter out words that are not informative (probabilities between 0.45 and 0.55)
print(len(probabilities))
for word in list(probabilities):
probs = probabilities[word]
if 0.40 < probs[0] and probs[0] < 0.60 and 0.40 < probs[1] and probs[1] < 0.60:
del probabilities[word]
print(len(probabilities))
# In[ ]:
probabilities
# In[ ]:
# COMBINING INDIVIDUAL PROBABILITIES
# Determining whether a message is spam or ham based only on the presence of one word is error-prone,
# must try to consider all the words (or the most interesting) in the message
from functools import reduce
def p_is_type(words):
words = list(filter(lambda x: x in probabilities, words)) # Filter out words not met during training-fase
pos_probs = []
neg_probs = []
for word in words:
pos_probs.append(probabilities[word][
|
if word not in already_counted:
neg_word_counter[word] += 1
|
conditional_block
|
movie_review_NaiveBayes.py
|
pos/', 0)
#test_neg = get_files('../datasets/movie_reviews/data/subset/test/neg/', 1)
print('* TEST DATA * ')
print('# positives reviews: ', len(test_pos))
print('# negatives reviews', len(test_neg))
test_data = test_pos + test_neg
print('# total reviews: ', len(test_data))
# Does not want a 50/50 split between training and test
# Therefore creates one big set of data that later will be split into 80/20 train- and testdata
# a = train_data[::2]
# b = train_data[1::2]
# c = test_data[::2]
# d = test_data[1::2]
# all_reviews = a + b + c + d'
all_r = train_data + test_data
shuffle(all_r)
# In[ ]:
all_reviews = all_r
# In[ ]:
stopwords = []
try:
f = open('../datasets/stopwords.txt', 'r')
stopwords = f.read().split(',')
except IOError:
print('Problem opening file')
finally:
f.close()
# In[ ]:
# * * * PREPROCESSING * * *
stemmer = PorterStemmer()
preprocessed_reviews = []
for t in all_reviews:
#print(len(preprocessed_reviews))
review = t[0]
review_type = t[1]
# Remove whitespace and punctutation
text = re.sub('[' + string.punctuation + ']', ' ', review)
text = re.sub('[\n\t\r]', '', text)
# Split words into list
words = text.split()
new = []
# Remove stopwords and stem remaining words
for word in words:
stemmed_word = stemmer.stem(word.lower())
if stemmed_word not in stopwords and len(stemmed_word) > 2:
new.append(stemmed_word)
# Add to preproccesed list
preprocessed_reviews.append((new, review_type))
# In[ ]:
count = 0
for r in preprocessed_reviews:
words = r[0]
for w in words:
if w in stopwords:
count += 1
a = 191569
count
# In[ ]:
# Splitting data in trainingdata and testdata (80-20 ratio)
total = len(preprocessed_reviews) #Total number of reviews
test_number = int(0.20 * total) # Number of testing reviews
# Picking randomly
print(test_number)
copy = preprocessed_reviews[:]
test_set = []
taken = {}
while len(test_set) < test_number:
#print(len(train_texts))
num = random.randint(0, test_number - 1)
if num not in taken.keys():
test_set.append(copy.pop(num))
taken[num] = 1
train_set = copy[:] # Trainset is the remaining reviews
len(train_set)/total, len(test_set)/total, len(train_set), len(test_set)
# In[ ]:
# * * * TRAINING THE MODEL * * *
# meaning: Computing probabilities needed for P(Positive|Word)
def total_goods_and_bads(tset):
goods = 0
bads = 0
for t in tset:
goods += 1 if t[1] == 0 else 0
bads += 1 if t[1] == 1 else 0
return goods, bads
total_positive = total_goods_and_bads(train_set)[0]
total_negative = total_goods_and_bads(train_set)[1]
print(total_positive)
print(total_negative)
# In[ ]:
# First making a word counter for pos and neg reviews
pos_word_counter = {}
neg_word_counter = {}
total_words = 0
for t in train_set:
review = t[0]
review_type = t[1]
already_counted = []
for word in review:
total_words += 1
if review_type == 0:
if word not in pos_word_counter:
pos_word_counter[word] = 1
else:
if word not in already_counted:
pos_word_counter[word] += 1
else:
if word not in neg_word_counter:
neg_word_counter[word] = 1
else:
if word not in already_counted:
neg_word_counter[word] += 1
already_counted.append(word)
total_words
# In[ ]:
# Removes words that are not inluded in at least 0.15% of the reviews
removed_words = 0
for j in range(len(train_set)):
words = train_set[j][0]
i = 0
while i < len(words):
word = words[i]
word_removed = False
if word in pos_word_counter:
if pos_word_counter[word] < 0.0015*len(train_set):
train_set[j][0].remove(word)
word_removed = True
removed_words += 1
elif word in neg_word_counter:
if neg_word_counter[word] < 0.0015*len(train_set):
train_set[j][0].remove(word)
word_removed = True
removed_words += 1
if not word_removed:
i += 1
j += 1
removed_words
# In[ ]:
def sort_dict(dicti, end):
# Sorterer etter value i dict, gir liste med tupler
most_common_words = sorted(dicti.items(), key = lambda kv: kv[1])
most_common_words.reverse()
most_common_words = most_common_words[:end]
# Lager dict på formen {word: count, ...}
# Vil ha dict fremfor liste med tupler, pga. senere søk
return dict(most_common_words)
most_used_words_pos = sort_dict(pos_word_counter, 25)
most_used_words_neg = sort_dict(neg_word_counter, 25)
most_used_words_pos
# In[ ]:
# Need these 4 probabilities
# 1) Probability that a word appears in positive reviews
# 2) Probability that a word appears in negative reviews
# 3) Overall probability that any given review is positive
# 4) Overall probability that any given reviews is negative
# # Making a dictionary with probabilities for different words appearing in good and bad reviews
# # Example: {'bad': (0.0881, 0.3226)}
probability_appearing = {}
for t in train_set:
text = t[0]
for word in text:
if word not in probability_appearing:
if word in pos_word_counter:
p_appearing_good = pos_word_counter[word]/total_positive
else:
p_appearing_good = 0.1
if word in neg_word_counter:
p_appearing_bad = neg_word_counter[word]/total_negative
else:
p_appearing_bad = 0.1
probability_appearing[word] = (p_appearing_good, p_appearing_bad)
p_pos = total_positive/len(train_set)
p_neg = total_negative/len(train_set)
print(p_good)
print(p_bad)
# Finally we can compute P(Positive | Word)
def p_
|
ord):
return (probability_appearing[word][0]*p_pos)/((probability_appearing[word][0]*p_pos + probability_appearing[word][1]*p_neg))
def p_is_negative_given_word(word):
return (probability_appearing[word][1]*p_neg)/((probability_appearing[word][1]*p_neg + probability_appearing[word][0]*p_pos))
p_is_positive_given_word('bad'), p_is_negative_given_word('bad')
# In[ ]:
probabilities = {}
for t in train_set:
text = t[0]
for word in text:
if word not in probabilities:
p_pos = p_is_positive_given_word(word)
p_neg = p_is_negative_given_word(word)
if p_pos == 0:
p_pos = 0.1 # tweaking this value
if p_pos == 1:
p_pos = 0.98
if p_neg == 0:
p_neg = 0.1
if p_neg == 1:
p_neg = 0.98
probabilities[word] = (p_pos, p_neg)
# In[ ]:
# Filter out words that are not informative (probabilities between 0.45 and 0.55)
print(len(probabilities))
for word in list(probabilities):
probs = probabilities[word]
if 0.40 < probs[0] and probs[0] < 0.60 and 0.40 < probs[1] and probs[1] < 0.60:
del probabilities[word]
print(len(probabilities))
# In[ ]:
probabilities
# In[ ]:
# COMBINING INDIVIDUAL PROBABILITIES
# Determining whether a message is spam or ham based only on the presence of one word is error-prone,
# must try to consider all the words (or the most interesting) in the message
from functools import reduce
def p_is_type(words):
words = list(filter(lambda x: x in probabilities, words)) # Filter out words not met during training-fase
pos_probs = []
neg_probs = []
for word in words:
pos_probs.append(probabilities
|
is_positive_given_word(w
|
identifier_name
|
movie_review_NaiveBayes.py
|
pos/', 0)
#test_neg = get_files('../datasets/movie_reviews/data/subset/test/neg/', 1)
print('* TEST DATA * ')
print('# positives reviews: ', len(test_pos))
print('# negatives reviews', len(test_neg))
test_data = test_pos + test_neg
print('# total reviews: ', len(test_data))
# Does not want a 50/50 split between training and test
# Therefore creates one big set of data that later will be split into 80/20 train- and testdata
# a = train_data[::2]
# b = train_data[1::2]
# c = test_data[::2]
# d = test_data[1::2]
# all_reviews = a + b + c + d'
all_r = train_data + test_data
shuffle(all_r)
# In[ ]:
all_reviews = all_r
# In[ ]:
stopwords = []
try:
f = open('../datasets/stopwords.txt', 'r')
stopwords = f.read().split(',')
except IOError:
print('Problem opening file')
finally:
f.close()
# In[ ]:
# * * * PREPROCESSING * * *
stemmer = PorterStemmer()
preprocessed_reviews = []
for t in all_reviews:
#print(len(preprocessed_reviews))
review = t[0]
review_type = t[1]
# Remove whitespace and punctutation
text = re.sub('[' + string.punctuation + ']', ' ', review)
text = re.sub('[\n\t\r]', '', text)
# Split words into list
words = text.split()
new = []
# Remove stopwords and stem remaining words
for word in words:
stemmed_word = stemmer.stem(word.lower())
if stemmed_word not in stopwords and len(stemmed_word) > 2:
new.append(stemmed_word)
# Add to preproccesed list
preprocessed_reviews.append((new, review_type))
# In[ ]:
count = 0
for r in preprocessed_reviews:
words = r[0]
for w in words:
if w in stopwords:
count += 1
a = 191569
count
# In[ ]:
# Splitting data in trainingdata and testdata (80-20 ratio)
total = len(preprocessed_reviews) #Total number of reviews
test_number = int(0.20 * total) # Number of testing reviews
# Picking randomly
print(test_number)
copy = preprocessed_reviews[:]
test_set = []
taken = {}
while len(test_set) < test_number:
#print(len(train_texts))
num = random.randint(0, test_number - 1)
if num not in taken.keys():
test_set.append(copy.pop(num))
taken[num] = 1
train_set = copy[:] # Trainset is the remaining reviews
len(train_set)/total, len(test_set)/total, len(train_set), len(test_set)
# In[ ]:
# * * * TRAINING THE MODEL * * *
# meaning: Computing probabilities needed for P(Positive|Word)
def total_goods_and_bads(tset):
goods = 0
bads = 0
for t in tset:
goods += 1 if t[1] == 0 else 0
bads += 1 if t[1] == 1 else 0
return goods, bads
total_positive = total_goods_and_bads(train_set)[0]
total_negative = total_goods_and_bads(train_set)[1]
print(total_positive)
print(total_negative)
# In[ ]:
# First making a word counter for pos and neg reviews
pos_word_counter = {}
neg_word_counter = {}
total_words = 0
for t in train_set:
review = t[0]
review_type = t[1]
already_counted = []
for word in review:
total_words += 1
if review_type == 0:
if word not in pos_word_counter:
pos_word_counter[word] = 1
else:
if word not in already_counted:
pos_word_counter[word] += 1
else:
if word not in neg_word_counter:
neg_word_counter[word] = 1
else:
if word not in already_counted:
neg_word_counter[word] += 1
already_counted.append(word)
total_words
# In[ ]:
# Removes words that are not inluded in at least 0.15% of the reviews
removed_words = 0
for j in range(len(train_set)):
words = train_set[j][0]
i = 0
while i < len(words):
word = words[i]
word_removed = False
if word in pos_word_counter:
if pos_word_counter[word] < 0.0015*len(train_set):
train_set[j][0].remove(word)
word_removed = True
removed_words += 1
elif word in neg_word_counter:
if neg_word_counter[word] < 0.0015*len(train_set):
train_set[j][0].remove(word)
word_removed = True
removed_words += 1
if not word_removed:
i += 1
j += 1
removed_words
# In[ ]:
def sort_dict(dicti, end):
# Sorterer etter value i dict, gir liste med tupler
|
most_used_words_pos = sort_dict(pos_word_counter, 25)
most_used_words_neg = sort_dict(neg_word_counter, 25)
most_used_words_pos
# In[ ]:
# Need these 4 probabilities
# 1) Probability that a word appears in positive reviews
# 2) Probability that a word appears in negative reviews
# 3) Overall probability that any given review is positive
# 4) Overall probability that any given reviews is negative
# # Making a dictionary with probabilities for different words appearing in good and bad reviews
# # Example: {'bad': (0.0881, 0.3226)}
probability_appearing = {}
for t in train_set:
text = t[0]
for word in text:
if word not in probability_appearing:
if word in pos_word_counter:
p_appearing_good = pos_word_counter[word]/total_positive
else:
p_appearing_good = 0.1
if word in neg_word_counter:
p_appearing_bad = neg_word_counter[word]/total_negative
else:
p_appearing_bad = 0.1
probability_appearing[word] = (p_appearing_good, p_appearing_bad)
p_pos = total_positive/len(train_set)
p_neg = total_negative/len(train_set)
print(p_good)
print(p_bad)
# Finally we can compute P(Positive | Word)
def p_is_positive_given_word(word):
return (probability_appearing[word][0]*p_pos)/((probability_appearing[word][0]*p_pos + probability_appearing[word][1]*p_neg))
def p_is_negative_given_word(word):
return (probability_appearing[word][1]*p_neg)/((probability_appearing[word][1]*p_neg + probability_appearing[word][0]*p_pos))
p_is_positive_given_word('bad'), p_is_negative_given_word('bad')
# In[ ]:
probabilities = {}
for t in train_set:
text = t[0]
for word in text:
if word not in probabilities:
p_pos = p_is_positive_given_word(word)
p_neg = p_is_negative_given_word(word)
if p_pos == 0:
p_pos = 0.1 # tweaking this value
if p_pos == 1:
p_pos = 0.98
if p_neg == 0:
p_neg = 0.1
if p_neg == 1:
p_neg = 0.98
probabilities[word] = (p_pos, p_neg)
# In[ ]:
# Filter out words that are not informative (probabilities between 0.45 and 0.55)
print(len(probabilities))
for word in list(probabilities):
probs = probabilities[word]
if 0.40 < probs[0] and probs[0] < 0.60 and 0.40 < probs[1] and probs[1] < 0.60:
del probabilities[word]
print(len(probabilities))
# In[ ]:
probabilities
# In[ ]:
# COMBINING INDIVIDUAL PROBABILITIES
# Determining whether a message is spam or ham based only on the presence of one word is error-prone,
# must try to consider all the words (or the most interesting) in the message
from functools import reduce
def p_is_type(words):
words = list(filter(lambda x: x in probabilities, words)) # Filter out words not met during training-fase
pos_probs = []
neg_probs = []
for word in words:
pos_probs.append(probabilities
|
most_common_words = sorted(dicti.items(), key = lambda kv: kv[1])
most_common_words.reverse()
most_common_words = most_common_words[:end]
# Lager dict på formen {word: count, ...}
# Vil ha dict fremfor liste med tupler, pga. senere søk
return dict(most_common_words)
|
identifier_body
|
movie_review_NaiveBayes.py
|
/pos/', 0)
#test_neg = get_files('../datasets/movie_reviews/data/subset/test/neg/', 1)
print('* TEST DATA * ')
print('# positives reviews: ', len(test_pos))
print('# negatives reviews', len(test_neg))
test_data = test_pos + test_neg
print('# total reviews: ', len(test_data))
# Does not want a 50/50 split between training and test
# Therefore creates one big set of data that later will be split into 80/20 train- and testdata
# a = train_data[::2]
# b = train_data[1::2]
# c = test_data[::2]
# d = test_data[1::2]
# all_reviews = a + b + c + d'
all_r = train_data + test_data
shuffle(all_r)
# In[ ]:
all_reviews = all_r
# In[ ]:
stopwords = []
try:
f = open('../datasets/stopwords.txt', 'r')
stopwords = f.read().split(',')
except IOError:
print('Problem opening file')
finally:
f.close()
# In[ ]:
# * * * PREPROCESSING * * *
stemmer = PorterStemmer()
preprocessed_reviews = []
for t in all_reviews:
#print(len(preprocessed_reviews))
review = t[0]
review_type = t[1]
# Remove whitespace and punctutation
text = re.sub('[' + string.punctuation + ']', ' ', review)
text = re.sub('[\n\t\r]', '', text)
# Split words into list
words = text.split()
new = []
# Remove stopwords and stem remaining words
for word in words:
stemmed_word = stemmer.stem(word.lower())
if stemmed_word not in stopwords and len(stemmed_word) > 2:
new.append(stemmed_word)
# Add to preproccesed list
preprocessed_reviews.append((new, review_type))
# In[ ]:
count = 0
for r in preprocessed_reviews:
words = r[0]
for w in words:
if w in stopwords:
count += 1
a = 191569
count
# In[ ]:
# Splitting data in trainingdata and testdata (80-20 ratio)
total = len(preprocessed_reviews) #Total number of reviews
test_number = int(0.20 * total) # Number of testing reviews
# Picking randomly
print(test_number)
copy = preprocessed_reviews[:]
test_set = []
taken = {}
while len(test_set) < test_number:
#print(len(train_texts))
num = random.randint(0, test_number - 1)
if num not in taken.keys():
test_set.append(copy.pop(num))
taken[num] = 1
train_set = copy[:] # Trainset is the remaining reviews
len(train_set)/total, len(test_set)/total, len(train_set), len(test_set)
# In[ ]:
# * * * TRAINING THE MODEL * * *
# meaning: Computing probabilities needed for P(Positive|Word)
def total_goods_and_bads(tset):
goods = 0
bads = 0
for t in tset:
goods += 1 if t[1] == 0 else 0
bads += 1 if t[1] == 1 else 0
return goods, bads
total_positive = total_goods_and_bads(train_set)[0]
total_negative = total_goods_and_bads(train_set)[1]
print(total_positive)
print(total_negative)
# In[ ]:
# First making a word counter for pos and neg reviews
pos_word_counter = {}
neg_word_counter = {}
total_words = 0
for t in train_set:
review = t[0]
review_type = t[1]
already_counted = []
for word in review:
total_words += 1
if review_type == 0:
if word not in pos_word_counter:
pos_word_counter[word] = 1
else:
if word not in already_counted:
pos_word_counter[word] += 1
else:
if word not in neg_word_counter:
neg_word_counter[word] = 1
else:
if word not in already_counted:
neg_word_counter[word] += 1
already_counted.append(word)
total_words
# In[ ]:
# Removes words that are not inluded in at least 0.15% of the reviews
removed_words = 0
for j in range(len(train_set)):
words = train_set[j][0]
i = 0
while i < len(words):
word = words[i]
word_removed = False
if word in pos_word_counter:
if pos_word_counter[word] < 0.0015*len(train_set):
train_set[j][0].remove(word)
word_removed = True
removed_words += 1
elif word in neg_word_counter:
if neg_word_counter[word] < 0.0015*len(train_set):
train_set[j][0].remove(word)
|
word_removed = True
removed_words += 1
if not word_removed:
i += 1
j += 1
removed_words
# In[ ]:
def sort_dict(dicti, end):
# Sorterer etter value i dict, gir liste med tupler
most_common_words = sorted(dicti.items(), key = lambda kv: kv[1])
most_common_words.reverse()
most_common_words = most_common_words[:end]
# Lager dict på formen {word: count, ...}
# Vil ha dict fremfor liste med tupler, pga. senere søk
return dict(most_common_words)
most_used_words_pos = sort_dict(pos_word_counter, 25)
most_used_words_neg = sort_dict(neg_word_counter, 25)
most_used_words_pos
# In[ ]:
# Need these 4 probabilities
# 1) Probability that a word appears in positive reviews
# 2) Probability that a word appears in negative reviews
# 3) Overall probability that any given review is positive
# 4) Overall probability that any given reviews is negative
# # Making a dictionary with probabilities for different words appearing in good and bad reviews
# # Example: {'bad': (0.0881, 0.3226)}
probability_appearing = {}
for t in train_set:
text = t[0]
for word in text:
if word not in probability_appearing:
if word in pos_word_counter:
p_appearing_good = pos_word_counter[word]/total_positive
else:
p_appearing_good = 0.1
if word in neg_word_counter:
p_appearing_bad = neg_word_counter[word]/total_negative
else:
p_appearing_bad = 0.1
probability_appearing[word] = (p_appearing_good, p_appearing_bad)
p_pos = total_positive/len(train_set)
p_neg = total_negative/len(train_set)
print(p_good)
print(p_bad)
# Finally we can compute P(Positive | Word)
def p_is_positive_given_word(word):
return (probability_appearing[word][0]*p_pos)/((probability_appearing[word][0]*p_pos + probability_appearing[word][1]*p_neg))
def p_is_negative_given_word(word):
return (probability_appearing[word][1]*p_neg)/((probability_appearing[word][1]*p_neg + probability_appearing[word][0]*p_pos))
p_is_positive_given_word('bad'), p_is_negative_given_word('bad')
# In[ ]:
probabilities = {}
for t in train_set:
text = t[0]
for word in text:
if word not in probabilities:
p_pos = p_is_positive_given_word(word)
p_neg = p_is_negative_given_word(word)
if p_pos == 0:
p_pos = 0.1 # tweaking this value
if p_pos == 1:
p_pos = 0.98
if p_neg == 0:
p_neg = 0.1
if p_neg == 1:
p_neg = 0.98
probabilities[word] = (p_pos, p_neg)
# In[ ]:
# Filter out words that are not informative (probabilities between 0.45 and 0.55)
print(len(probabilities))
for word in list(probabilities):
probs = probabilities[word]
if 0.40 < probs[0] and probs[0] < 0.60 and 0.40 < probs[1] and probs[1] < 0.60:
del probabilities[word]
print(len(probabilities))
# In[ ]:
probabilities
# In[ ]:
# COMBINING INDIVIDUAL PROBABILITIES
# Determining whether a message is spam or ham based only on the presence of one word is error-prone,
# must try to consider all the words (or the most interesting) in the message
from functools import reduce
def p_is_type(words):
words = list(filter(lambda x: x in probabilities, words)) # Filter out words not met during training-fase
pos_probs = []
neg_probs = []
for word in words:
pos_probs.append(probabilities[word
|
random_line_split
|
|
tls-server.rs
|
BufReader::new(File::open(path)?))
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "invalid cert"))
.map(|mut certs| certs.drain(..).map(Certificate).collect())
}
fn load_keys(path: &Path, password: Option<&str>) -> io::Result<Vec<PrivateKey>> {
let expected_tag = match &password {
Some(_) => "ENCRYPTED PRIVATE KEY",
None => "PRIVATE KEY",
};
if expected_tag.eq("PRIVATE KEY") {
pkcs8_private_keys(&mut BufReader::new(File::open(path)?))
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "invalid key"))
.map(|mut keys| keys.drain(..).map(PrivateKey).collect())
} else {
let content = std::fs::read(path)?;
let mut iter = pem::parse_many(content)
.map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err.to_string()))?
.into_iter()
.filter(|x| x.tag() == expected_tag)
.map(|x| x.contents().to_vec());
match iter.next() {
Some(key) => match password {
Some(password) => {
let encrypted =
pkcs8::EncryptedPrivateKeyInfo::from_der(&key).map_err(|err| {
io::Error::new(io::ErrorKind::InvalidData, err.to_string())
})?;
let decrypted = encrypted.decrypt(password).map_err(|err| {
io::Error::new(io::ErrorKind::InvalidData, err.to_string())
})?;
let key = decrypted.as_bytes().to_vec();
let key = rustls::PrivateKey(key);
let private_keys = vec![key];
io::Result::Ok(private_keys)
}
None => io::Result::Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid key")),
},
None => io::Result::Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid key")),
}
}
}
struct ExampleService {
input_registers: Arc<Mutex<HashMap<u16, u16>>>,
holding_registers: Arc<Mutex<HashMap<u16, u16>>>,
}
impl tokio_modbus::server::Service for ExampleService {
type Request = Request<'static>;
type Response = Response;
type Error = std::io::Error;
type Future = future::Ready<Result<Self::Response, Self::Error>>;
fn call(&self, req: Self::Request) -> Self::Future {
match req {
Request::ReadInputRegisters(addr, cnt) => {
match register_read(&self.input_registers.lock().unwrap(), addr, cnt) {
Ok(values) => future::ready(Ok(Response::ReadInputRegisters(values))),
Err(err) => future::ready(Err(err)),
}
}
Request::ReadHoldingRegisters(addr, cnt) => {
match register_read(&self.holding_registers.lock().unwrap(), addr, cnt) {
Ok(values) => future::ready(Ok(Response::ReadHoldingRegisters(values))),
Err(err) => future::ready(Err(err)),
}
}
Request::WriteMultipleRegisters(addr, values) => {
match register_write(&mut self.holding_registers.lock().unwrap(), addr, &values) {
Ok(_) => future::ready(Ok(Response::WriteMultipleRegisters(
addr,
values.len() as u16,
))),
Err(err) => future::ready(Err(err)),
}
}
Request::WriteSingleRegister(addr, value) => {
match register_write(
&mut self.holding_registers.lock().unwrap(),
addr,
std::slice::from_ref(&value),
) {
Ok(_) => future::ready(Ok(Response::WriteSingleRegister(addr, value))),
Err(err) => future::ready(Err(err)),
}
}
_ => {
println!("SERVER: Exception::IllegalFunction - Unimplemented function code in request: {req:?}");
// TODO: We want to return a Modbus Exception response `IllegalFunction`. https://github.com/slowtec/tokio-modbus/issues/165
future::ready(Err(std::io::Error::new(
std::io::ErrorKind::AddrNotAvailable,
"Unimplemented function code in request".to_string(),
)))
}
}
}
}
impl ExampleService {
fn new() -> Self {
// Insert some test data as register values.
let mut input_registers = HashMap::new();
input_registers.insert(0, 1234);
input_registers.insert(1, 5678);
let mut holding_registers = HashMap::new();
holding_registers.insert(0, 10);
holding_registers.insert(1, 20);
holding_registers.insert(2, 30);
holding_registers.insert(3, 40);
Self {
input_registers: Arc::new(Mutex::new(input_registers)),
holding_registers: Arc::new(Mutex::new(holding_registers)),
}
}
}
/// Helper function implementing reading registers from a HashMap.
fn register_read(
registers: &HashMap<u16, u16>,
addr: u16,
cnt: u16,
) -> Result<Vec<u16>, std::io::Error> {
let mut response_values = vec![0; cnt.into()];
for i in 0..cnt {
let reg_addr = addr + i;
if let Some(r) = registers.get(®_addr) {
response_values[i as usize] = *r;
} else {
// TODO: Return a Modbus Exception response `IllegalDataAddress` https://github.com/slowtec/tokio-modbus/issues/165
println!("SERVER: Exception::IllegalDataAddress");
return Err(std::io::Error::new(
std::io::ErrorKind::AddrNotAvailable,
format!("no register at address {reg_addr}"),
));
}
}
Ok(response_values)
}
/// Write a holding register. Used by both the write single register
/// and write multiple registers requests.
fn register_write(
registers: &mut HashMap<u16, u16>,
addr: u16,
values: &[u16],
) -> Result<(), std::io::Error> {
for (i, value) in values.iter().enumerate() {
let reg_addr = addr + i as u16;
if let Some(r) = registers.get_mut(®_addr) {
*r = *value;
} else {
// TODO: Return a Modbus Exception response `IllegalDataAddress` https://github.com/slowtec/tokio-modbus/issues/165
println!("SERVER: Exception::IllegalDataAddress");
return Err(std::io::Error::new(
std::io::ErrorKind::AddrNotAvailable,
format!("no register at address {reg_addr}"),
));
}
}
Ok(())
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let socket_addr = "127.0.0.1:8802".parse()?;
tokio::select! {
_ = server_context(socket_addr) => unreachable!(),
_ = client_context(socket_addr) => println!("Exiting"),
}
Ok(())
}
async fn
|
(socket_addr: SocketAddr) -> anyhow::Result<()> {
println!("Starting up server on {socket_addr}");
let listener = TcpListener::bind(socket_addr).await?;
let server = Server::new(listener);
let on_connected = |stream, _socket_addr| async move {
let cert_path = Path::new("./pki/server.pem");
let key_path = Path::new("./pki/server.key");
let certs = load_certs(cert_path)?;
let mut keys = load_keys(key_path, None)?;
let config = rustls::ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(certs, keys.remove(0))
.map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?;
let acceptor = TlsAcceptor::from(Arc::new(config));
let service = ExampleService::new();
let stream = acceptor.accept(stream).await;
match stream {
Ok(stream) => Ok(Some((service, stream))),
Err(_) => Ok(None),
}
};
let on_process_error = |err| {
eprintln!("{err}");
};
server.serve(&on_connected, on_process_error).await?;
Ok(())
}
async fn client_context(socket_addr: SocketAddr) {
use tokio_modbus::prelude::*;
tokio::join!(
async {
// Give the server some time for starting up
tokio::time::sleep(Duration::from_secs(1)).await;
println!("Connecting client...");
let mut root_cert_store = rustls::RootCertStore::empty();
let ca_path = Path::new("./pki/ca.pem");
let mut pem = BufReader::new(File::open(ca_path).unwrap());
let certs = rustls_pemfile::certs(&mut pem).unwrap();
let trust_anchors = certs.iter().map(|cert| {
let ta = TrustAnchor::try_from_cert_der(&cert[..]).unwrap();
OwnedTrustAnchor::from_subject_spki
|
server_context
|
identifier_name
|
tls-server.rs
|
mut BufReader::new(File::open(path)?))
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "invalid cert"))
.map(|mut certs| certs.drain(..).map(Certificate).collect())
}
fn load_keys(path: &Path, password: Option<&str>) -> io::Result<Vec<PrivateKey>> {
let expected_tag = match &password {
Some(_) => "ENCRYPTED PRIVATE KEY",
None => "PRIVATE KEY",
};
if expected_tag.eq("PRIVATE KEY") {
pkcs8_private_keys(&mut BufReader::new(File::open(path)?))
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "invalid key"))
.map(|mut keys| keys.drain(..).map(PrivateKey).collect())
} else {
let content = std::fs::read(path)?;
let mut iter = pem::parse_many(content)
.map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err.to_string()))?
.into_iter()
.filter(|x| x.tag() == expected_tag)
.map(|x| x.contents().to_vec());
match iter.next() {
Some(key) => match password {
Some(password) => {
let encrypted =
pkcs8::EncryptedPrivateKeyInfo::from_der(&key).map_err(|err| {
io::Error::new(io::ErrorKind::InvalidData, err.to_string())
})?;
let decrypted = encrypted.decrypt(password).map_err(|err| {
io::Error::new(io::ErrorKind::InvalidData, err.to_string())
})?;
let key = decrypted.as_bytes().to_vec();
let key = rustls::PrivateKey(key);
let private_keys = vec![key];
io::Result::Ok(private_keys)
}
None => io::Result::Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid key")),
},
None => io::Result::Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid key")),
}
}
}
struct ExampleService {
input_registers: Arc<Mutex<HashMap<u16, u16>>>,
holding_registers: Arc<Mutex<HashMap<u16, u16>>>,
}
impl tokio_modbus::server::Service for ExampleService {
type Request = Request<'static>;
type Response = Response;
type Error = std::io::Error;
type Future = future::Ready<Result<Self::Response, Self::Error>>;
|
Ok(values) => future::ready(Ok(Response::ReadInputRegisters(values))),
Err(err) => future::ready(Err(err)),
}
}
Request::ReadHoldingRegisters(addr, cnt) => {
match register_read(&self.holding_registers.lock().unwrap(), addr, cnt) {
Ok(values) => future::ready(Ok(Response::ReadHoldingRegisters(values))),
Err(err) => future::ready(Err(err)),
}
}
Request::WriteMultipleRegisters(addr, values) => {
match register_write(&mut self.holding_registers.lock().unwrap(), addr, &values) {
Ok(_) => future::ready(Ok(Response::WriteMultipleRegisters(
addr,
values.len() as u16,
))),
Err(err) => future::ready(Err(err)),
}
}
Request::WriteSingleRegister(addr, value) => {
match register_write(
&mut self.holding_registers.lock().unwrap(),
addr,
std::slice::from_ref(&value),
) {
Ok(_) => future::ready(Ok(Response::WriteSingleRegister(addr, value))),
Err(err) => future::ready(Err(err)),
}
}
_ => {
println!("SERVER: Exception::IllegalFunction - Unimplemented function code in request: {req:?}");
// TODO: We want to return a Modbus Exception response `IllegalFunction`. https://github.com/slowtec/tokio-modbus/issues/165
future::ready(Err(std::io::Error::new(
std::io::ErrorKind::AddrNotAvailable,
"Unimplemented function code in request".to_string(),
)))
}
}
}
}
impl ExampleService {
fn new() -> Self {
// Insert some test data as register values.
let mut input_registers = HashMap::new();
input_registers.insert(0, 1234);
input_registers.insert(1, 5678);
let mut holding_registers = HashMap::new();
holding_registers.insert(0, 10);
holding_registers.insert(1, 20);
holding_registers.insert(2, 30);
holding_registers.insert(3, 40);
Self {
input_registers: Arc::new(Mutex::new(input_registers)),
holding_registers: Arc::new(Mutex::new(holding_registers)),
}
}
}
/// Helper function implementing reading registers from a HashMap.
fn register_read(
registers: &HashMap<u16, u16>,
addr: u16,
cnt: u16,
) -> Result<Vec<u16>, std::io::Error> {
let mut response_values = vec![0; cnt.into()];
for i in 0..cnt {
let reg_addr = addr + i;
if let Some(r) = registers.get(®_addr) {
response_values[i as usize] = *r;
} else {
// TODO: Return a Modbus Exception response `IllegalDataAddress` https://github.com/slowtec/tokio-modbus/issues/165
println!("SERVER: Exception::IllegalDataAddress");
return Err(std::io::Error::new(
std::io::ErrorKind::AddrNotAvailable,
format!("no register at address {reg_addr}"),
));
}
}
Ok(response_values)
}
/// Write a holding register. Used by both the write single register
/// and write multiple registers requests.
fn register_write(
registers: &mut HashMap<u16, u16>,
addr: u16,
values: &[u16],
) -> Result<(), std::io::Error> {
for (i, value) in values.iter().enumerate() {
let reg_addr = addr + i as u16;
if let Some(r) = registers.get_mut(®_addr) {
*r = *value;
} else {
// TODO: Return a Modbus Exception response `IllegalDataAddress` https://github.com/slowtec/tokio-modbus/issues/165
println!("SERVER: Exception::IllegalDataAddress");
return Err(std::io::Error::new(
std::io::ErrorKind::AddrNotAvailable,
format!("no register at address {reg_addr}"),
));
}
}
Ok(())
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let socket_addr = "127.0.0.1:8802".parse()?;
tokio::select! {
_ = server_context(socket_addr) => unreachable!(),
_ = client_context(socket_addr) => println!("Exiting"),
}
Ok(())
}
async fn server_context(socket_addr: SocketAddr) -> anyhow::Result<()> {
println!("Starting up server on {socket_addr}");
let listener = TcpListener::bind(socket_addr).await?;
let server = Server::new(listener);
let on_connected = |stream, _socket_addr| async move {
let cert_path = Path::new("./pki/server.pem");
let key_path = Path::new("./pki/server.key");
let certs = load_certs(cert_path)?;
let mut keys = load_keys(key_path, None)?;
let config = rustls::ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(certs, keys.remove(0))
.map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?;
let acceptor = TlsAcceptor::from(Arc::new(config));
let service = ExampleService::new();
let stream = acceptor.accept(stream).await;
match stream {
Ok(stream) => Ok(Some((service, stream))),
Err(_) => Ok(None),
}
};
let on_process_error = |err| {
eprintln!("{err}");
};
server.serve(&on_connected, on_process_error).await?;
Ok(())
}
async fn client_context(socket_addr: SocketAddr) {
use tokio_modbus::prelude::*;
tokio::join!(
async {
// Give the server some time for starting up
tokio::time::sleep(Duration::from_secs(1)).await;
println!("Connecting client...");
let mut root_cert_store = rustls::RootCertStore::empty();
let ca_path = Path::new("./pki/ca.pem");
let mut pem = BufReader::new(File::open(ca_path).unwrap());
let certs = rustls_pemfile::certs(&mut pem).unwrap();
let trust_anchors = certs.iter().map(|cert| {
let ta = TrustAnchor::try_from_cert_der(&cert[..]).unwrap();
OwnedTrustAnchor::from_subject_spki_name
|
fn call(&self, req: Self::Request) -> Self::Future {
match req {
Request::ReadInputRegisters(addr, cnt) => {
match register_read(&self.input_registers.lock().unwrap(), addr, cnt) {
|
random_line_split
|
tls-server.rs
|
BufReader::new(File::open(path)?))
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "invalid cert"))
.map(|mut certs| certs.drain(..).map(Certificate).collect())
}
fn load_keys(path: &Path, password: Option<&str>) -> io::Result<Vec<PrivateKey>> {
let expected_tag = match &password {
Some(_) => "ENCRYPTED PRIVATE KEY",
None => "PRIVATE KEY",
};
if expected_tag.eq("PRIVATE KEY") {
pkcs8_private_keys(&mut BufReader::new(File::open(path)?))
.map_err(|_| io::Error::new(io::ErrorKind::InvalidInput, "invalid key"))
.map(|mut keys| keys.drain(..).map(PrivateKey).collect())
} else {
let content = std::fs::read(path)?;
let mut iter = pem::parse_many(content)
.map_err(|err| io::Error::new(io::ErrorKind::InvalidData, err.to_string()))?
.into_iter()
.filter(|x| x.tag() == expected_tag)
.map(|x| x.contents().to_vec());
match iter.next() {
Some(key) => match password {
Some(password) => {
let encrypted =
pkcs8::EncryptedPrivateKeyInfo::from_der(&key).map_err(|err| {
io::Error::new(io::ErrorKind::InvalidData, err.to_string())
})?;
let decrypted = encrypted.decrypt(password).map_err(|err| {
io::Error::new(io::ErrorKind::InvalidData, err.to_string())
})?;
let key = decrypted.as_bytes().to_vec();
let key = rustls::PrivateKey(key);
let private_keys = vec![key];
io::Result::Ok(private_keys)
}
None => io::Result::Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid key")),
},
None => io::Result::Err(io::Error::new(io::ErrorKind::InvalidInput, "invalid key")),
}
}
}
struct ExampleService {
input_registers: Arc<Mutex<HashMap<u16, u16>>>,
holding_registers: Arc<Mutex<HashMap<u16, u16>>>,
}
impl tokio_modbus::server::Service for ExampleService {
type Request = Request<'static>;
type Response = Response;
type Error = std::io::Error;
type Future = future::Ready<Result<Self::Response, Self::Error>>;
fn call(&self, req: Self::Request) -> Self::Future
|
Err(err) => future::ready(Err(err)),
}
}
Request::WriteSingleRegister(addr, value) => {
match register_write(
&mut self.holding_registers.lock().unwrap(),
addr,
std::slice::from_ref(&value),
) {
Ok(_) => future::ready(Ok(Response::WriteSingleRegister(addr, value))),
Err(err) => future::ready(Err(err)),
}
}
_ => {
println!("SERVER: Exception::IllegalFunction - Unimplemented function code in request: {req:?}");
// TODO: We want to return a Modbus Exception response `IllegalFunction`. https://github.com/slowtec/tokio-modbus/issues/165
future::ready(Err(std::io::Error::new(
std::io::ErrorKind::AddrNotAvailable,
"Unimplemented function code in request".to_string(),
)))
}
}
}
}
impl ExampleService {
fn new() -> Self {
// Insert some test data as register values.
let mut input_registers = HashMap::new();
input_registers.insert(0, 1234);
input_registers.insert(1, 5678);
let mut holding_registers = HashMap::new();
holding_registers.insert(0, 10);
holding_registers.insert(1, 20);
holding_registers.insert(2, 30);
holding_registers.insert(3, 40);
Self {
input_registers: Arc::new(Mutex::new(input_registers)),
holding_registers: Arc::new(Mutex::new(holding_registers)),
}
}
}
/// Helper function implementing reading registers from a HashMap.
fn register_read(
registers: &HashMap<u16, u16>,
addr: u16,
cnt: u16,
) -> Result<Vec<u16>, std::io::Error> {
let mut response_values = vec![0; cnt.into()];
for i in 0..cnt {
let reg_addr = addr + i;
if let Some(r) = registers.get(®_addr) {
response_values[i as usize] = *r;
} else {
// TODO: Return a Modbus Exception response `IllegalDataAddress` https://github.com/slowtec/tokio-modbus/issues/165
println!("SERVER: Exception::IllegalDataAddress");
return Err(std::io::Error::new(
std::io::ErrorKind::AddrNotAvailable,
format!("no register at address {reg_addr}"),
));
}
}
Ok(response_values)
}
/// Write a holding register. Used by both the write single register
/// and write multiple registers requests.
fn register_write(
registers: &mut HashMap<u16, u16>,
addr: u16,
values: &[u16],
) -> Result<(), std::io::Error> {
for (i, value) in values.iter().enumerate() {
let reg_addr = addr + i as u16;
if let Some(r) = registers.get_mut(®_addr) {
*r = *value;
} else {
// TODO: Return a Modbus Exception response `IllegalDataAddress` https://github.com/slowtec/tokio-modbus/issues/165
println!("SERVER: Exception::IllegalDataAddress");
return Err(std::io::Error::new(
std::io::ErrorKind::AddrNotAvailable,
format!("no register at address {reg_addr}"),
));
}
}
Ok(())
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let socket_addr = "127.0.0.1:8802".parse()?;
tokio::select! {
_ = server_context(socket_addr) => unreachable!(),
_ = client_context(socket_addr) => println!("Exiting"),
}
Ok(())
}
async fn server_context(socket_addr: SocketAddr) -> anyhow::Result<()> {
println!("Starting up server on {socket_addr}");
let listener = TcpListener::bind(socket_addr).await?;
let server = Server::new(listener);
let on_connected = |stream, _socket_addr| async move {
let cert_path = Path::new("./pki/server.pem");
let key_path = Path::new("./pki/server.key");
let certs = load_certs(cert_path)?;
let mut keys = load_keys(key_path, None)?;
let config = rustls::ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(certs, keys.remove(0))
.map_err(|err| io::Error::new(io::ErrorKind::InvalidInput, err))?;
let acceptor = TlsAcceptor::from(Arc::new(config));
let service = ExampleService::new();
let stream = acceptor.accept(stream).await;
match stream {
Ok(stream) => Ok(Some((service, stream))),
Err(_) => Ok(None),
}
};
let on_process_error = |err| {
eprintln!("{err}");
};
server.serve(&on_connected, on_process_error).await?;
Ok(())
}
async fn client_context(socket_addr: SocketAddr) {
use tokio_modbus::prelude::*;
tokio::join!(
async {
// Give the server some time for starting up
tokio::time::sleep(Duration::from_secs(1)).await;
println!("Connecting client...");
let mut root_cert_store = rustls::RootCertStore::empty();
let ca_path = Path::new("./pki/ca.pem");
let mut pem = BufReader::new(File::open(ca_path).unwrap());
let certs = rustls_pemfile::certs(&mut pem).unwrap();
let trust_anchors = certs.iter().map(|cert| {
let ta = TrustAnchor::try_from_cert_der(&cert[..]).unwrap();
OwnedTrustAnchor::from_subject_spki
|
{
match req {
Request::ReadInputRegisters(addr, cnt) => {
match register_read(&self.input_registers.lock().unwrap(), addr, cnt) {
Ok(values) => future::ready(Ok(Response::ReadInputRegisters(values))),
Err(err) => future::ready(Err(err)),
}
}
Request::ReadHoldingRegisters(addr, cnt) => {
match register_read(&self.holding_registers.lock().unwrap(), addr, cnt) {
Ok(values) => future::ready(Ok(Response::ReadHoldingRegisters(values))),
Err(err) => future::ready(Err(err)),
}
}
Request::WriteMultipleRegisters(addr, values) => {
match register_write(&mut self.holding_registers.lock().unwrap(), addr, &values) {
Ok(_) => future::ready(Ok(Response::WriteMultipleRegisters(
addr,
values.len() as u16,
))),
|
identifier_body
|
program.py
|
.compile(r"(\w*)\s*(?:```)(\w*)?([\s\S]*)(?:```$)")
@property
def session(self):
return self.bot.http._HTTPClient__session
|
async def _run_code(self, *, lang: str, code: str):
res = await self.session.post(
"https://emkc.org/api/v1/piston/execute",
json={"language": lang, "source": code})
return await res.json()
@commands.command()
async def run(self, ctx: commands.Context, *, codeblock: str):
"""
Run code and get results instantly
**Note**: You must use codeblocks around the code
Supported languages: awk, bash, brainfuck, c, cpp, crystal, csharp, d, dash, deno, elixer, emacs, go, haskell, java, jelly, julia, kotlin, lisp, lua, nasm, nasm64, nim, node, osabie, paradoc, perl, php, prolog, python2, python, ruby, rust, scala, swift, typescript, zig
"""
matches = self.regex.findall(codeblock)
if not matches:
return await ctx.reply(embed=discord.Embed(title="Uh-oh", description="Couldn't quite see your codeblock"))
lang = matches[0][0] or matches[0][1]
if not lang:
return await ctx.reply(embed=discord.Embed(title="Uh-oh",
description="Couldn't find the language hinted in the codeblock or before it"))
code = matches[0][2]
result = await self._run_code(lang=lang, code=code)
await self._send_result(ctx, result)
@commands.command()
async def runl(self, ctx: commands.Context, lang: str, *, code: str):
"""
Run a single line of code, **must** specify language as first argument
Supported languages: awk, bash, brainfuck, c, cpp, crystal, csharp, d, dash, deno, elixer, emacs, go, haskell, java, jelly, julia, kotlin, lisp, lua, nasm, nasm64, nim, node, osabie, paradoc, perl, php, prolog, python2, python, ruby, rust, scala, swift, typescript, zig
"""
result = await self._run_code(lang=lang, code=code)
await self._send_result(ctx, result)
async def _send_result(self, ctx: commands.Context, result: dict):
if "message" in result:
return await ctx.reply(embed=discord.Embed(title="Uh-oh", description=result["message"]))
output = result['output']
# if len(output) > 2000:
# url = await create_guest_paste_bin(self.session, output)
# return await ctx.reply("Your output was too long, so here's the pastebin link " + url)
embed = discord.Embed(
title=f"{result['language'][0].upper() + result['language'][1:]}")
newline = '\n'
rep = {"python3": "py", "python2": "py", 'node': 'js'}
rep = dict((re.escape(k), v) for k, v in rep.items())
pattern = re.compile("|".join(rep.keys()))
converted_language = pattern.sub(lambda m: rep[re.escape(m.group(0))], result['language'])
limit = 1024 - (29 + len(converted_language))
output = f"```{converted_language}\n{output[:limit]}```{(len(output) > limit) * (newline + '**Output shortened**')}"
embed.add_field(name="Output", value=output or "**No output**")
try:
await ctx.reply(embed=embed)
except:
await ctx.reply(output)
def parse_object_inv(self, stream, url):
# key: URL
# n.b.: key doesn't have `discord` or `discord.ext.commands` namespaces
result = {}
# first line is version info
inv_version = stream.readline().rstrip()
if inv_version != '# Sphinx inventory version 2':
raise RuntimeError('Invalid objects.inv file version.')
# next line is "# Project: <name>"
# then after that is "# Version: <version>"
projname = stream.readline().rstrip()[11:]
version = stream.readline().rstrip()[11:]
# next line says if it's a zlib header
line = stream.readline()
if 'zlib' not in line:
raise RuntimeError('Invalid objects.inv file, not z-lib compatible.')
# This code mostly comes from the Sphinx repository.
entry_regex = re.compile(r'(?x)(.+?)\s+(\S*:\S*)\s+(-?\d+)\s+(\S+)\s+(.*)')
for line in stream.read_compressed_lines():
match = entry_regex.match(line.rstrip())
if not match:
continue
name, directive, prio, location, dispname = match.groups()
domain, _, subdirective = directive.partition(':')
if directive == 'py:module' and name in result:
# From the Sphinx Repository:
# due to a bug in 1.1 and below,
# two inventory entries are created
# for Python modules, and the first
# one is correct
continue
# Most documentation pages have a label
if directive == 'std:doc':
subdirective = 'label'
if location.endswith('$'):
location = location[:-1] + name
key = name if dispname == '-' else dispname
prefix = f'{subdirective}:' if domain == 'std' else ''
if projname == 'discord.py':
key = key.replace('discord.ext.commands.', '').replace('discord.', '')
result[f'{prefix}{key}'] = os.path.join(url, location)
return result
async def build_rtfm_lookup_table(self, page_types):
cache = {}
for key, page in page_types.items():
sub = cache[key] = {}
async with self.bot.session.get(page + '/objects.inv') as resp:
if resp.status != 200:
raise RuntimeError('Cannot build rtfm lookup table, try again later.')
stream = SphinxObjectFileReader(await resp.read())
cache[key] = self.parse_object_inv(stream, page)
self._rtfm_cache = cache
async def do_rtfm(self, ctx, key, obj):
page_types = {
'latest': 'https://discordpy.readthedocs.io/en/latest',
'latest-jp': 'https://discordpy.readthedocs.io/ja/latest',
'python': 'https://docs.python.org/3',
'python-jp': 'https://docs.python.org/ja/3',
}
if obj is None:
await ctx.send(page_types[key])
return
if not hasattr(self, '_rtfm_cache'):
await ctx.trigger_typing()
await self.build_rtfm_lookup_table(page_types)
obj = re.sub(r'^(?:discord\.(?:ext\.)?)?(?:commands\.)?(.+)', r'\1', obj)
if key.startswith('latest'):
# point the abc.Messageable types properly:
q = obj.lower()
for name in dir(discord.abc.Messageable):
if name[0] == '_':
continue
if q == name:
obj = f'abc.Messageable.{name}'
break
cache = list(self._rtfm_cache[key].items())
def transform(tup):
return tup[0]
matches = fuzzy.finder(obj, cache, key=lambda t: t[0], lazy=False)[:8]
e = discord.Embed(colour=discord.Colour.blurple())
if len(matches) == 0:
return await ctx.send('Could not find anything. Sorry.')
e.description = '\n'.join(f'[`{key}`]({url})' for key, url in matches)
await ctx.send(embed=e)
def transform_rtfm_language_key(self, ctx, prefix):
if ctx.guild is not None:
# 日本語 category
if ctx.channel.category_id == 490287576670928914:
return prefix + '-jp'
# d.py unofficial JP
elif ctx.guild.id == 463986890190749698:
return prefix + '-jp'
return prefix
@commands.group(aliases=['rtfd'], invoke_without_command=True)
async def rtfm(self, ctx, *, obj: str = None):
"""Gives you a documentation link for a discord.py entity.
Events, objects, and functions are all supported through a
a cruddy fuzzy algorithm.
"""
key = self.transform_rtfm_language_key(ctx, 'latest')
await self.do_rtfm(ctx, key, obj)
@rtfm.command(name='jp')
async def rtfm_jp(self, ctx, *, obj: str = None):
"""Gives you a documentation link for a discord.py entity (Japanese)."""
await self.do_rtfm(ctx, 'latest-jp', obj)
@rtfm.command(name='python', aliases=['py'])
async def rtfm_python(self, ctx, *, obj: str
|
random_line_split
|
|
program.py
|
emkc.org/api/v1/piston/execute",
json={"language": lang, "source": code})
return await res.json()
@commands.command()
async def run(self, ctx: commands.Context, *, codeblock: str):
"""
Run code and get results instantly
**Note**: You must use codeblocks around the code
Supported languages: awk, bash, brainfuck, c, cpp, crystal, csharp, d, dash, deno, elixer, emacs, go, haskell, java, jelly, julia, kotlin, lisp, lua, nasm, nasm64, nim, node, osabie, paradoc, perl, php, prolog, python2, python, ruby, rust, scala, swift, typescript, zig
"""
matches = self.regex.findall(codeblock)
if not matches:
return await ctx.reply(embed=discord.Embed(title="Uh-oh", description="Couldn't quite see your codeblock"))
lang = matches[0][0] or matches[0][1]
if not lang:
return await ctx.reply(embed=discord.Embed(title="Uh-oh",
description="Couldn't find the language hinted in the codeblock or before it"))
code = matches[0][2]
result = await self._run_code(lang=lang, code=code)
await self._send_result(ctx, result)
@commands.command()
async def runl(self, ctx: commands.Context, lang: str, *, code: str):
"""
Run a single line of code, **must** specify language as first argument
Supported languages: awk, bash, brainfuck, c, cpp, crystal, csharp, d, dash, deno, elixer, emacs, go, haskell, java, jelly, julia, kotlin, lisp, lua, nasm, nasm64, nim, node, osabie, paradoc, perl, php, prolog, python2, python, ruby, rust, scala, swift, typescript, zig
"""
result = await self._run_code(lang=lang, code=code)
await self._send_result(ctx, result)
async def _send_result(self, ctx: commands.Context, result: dict):
if "message" in result:
return await ctx.reply(embed=discord.Embed(title="Uh-oh", description=result["message"]))
output = result['output']
# if len(output) > 2000:
# url = await create_guest_paste_bin(self.session, output)
# return await ctx.reply("Your output was too long, so here's the pastebin link " + url)
embed = discord.Embed(
title=f"{result['language'][0].upper() + result['language'][1:]}")
newline = '\n'
rep = {"python3": "py", "python2": "py", 'node': 'js'}
rep = dict((re.escape(k), v) for k, v in rep.items())
pattern = re.compile("|".join(rep.keys()))
converted_language = pattern.sub(lambda m: rep[re.escape(m.group(0))], result['language'])
limit = 1024 - (29 + len(converted_language))
output = f"```{converted_language}\n{output[:limit]}```{(len(output) > limit) * (newline + '**Output shortened**')}"
embed.add_field(name="Output", value=output or "**No output**")
try:
await ctx.reply(embed=embed)
except:
await ctx.reply(output)
def parse_object_inv(self, stream, url):
# key: URL
# n.b.: key doesn't have `discord` or `discord.ext.commands` namespaces
result = {}
# first line is version info
inv_version = stream.readline().rstrip()
if inv_version != '# Sphinx inventory version 2':
raise RuntimeError('Invalid objects.inv file version.')
# next line is "# Project: <name>"
# then after that is "# Version: <version>"
projname = stream.readline().rstrip()[11:]
version = stream.readline().rstrip()[11:]
# next line says if it's a zlib header
line = stream.readline()
if 'zlib' not in line:
raise RuntimeError('Invalid objects.inv file, not z-lib compatible.')
# This code mostly comes from the Sphinx repository.
entry_regex = re.compile(r'(?x)(.+?)\s+(\S*:\S*)\s+(-?\d+)\s+(\S+)\s+(.*)')
for line in stream.read_compressed_lines():
match = entry_regex.match(line.rstrip())
if not match:
continue
name, directive, prio, location, dispname = match.groups()
domain, _, subdirective = directive.partition(':')
if directive == 'py:module' and name in result:
# From the Sphinx Repository:
# due to a bug in 1.1 and below,
# two inventory entries are created
# for Python modules, and the first
# one is correct
continue
# Most documentation pages have a label
if directive == 'std:doc':
subdirective = 'label'
if location.endswith('$'):
location = location[:-1] + name
key = name if dispname == '-' else dispname
prefix = f'{subdirective}:' if domain == 'std' else ''
if projname == 'discord.py':
key = key.replace('discord.ext.commands.', '').replace('discord.', '')
result[f'{prefix}{key}'] = os.path.join(url, location)
return result
async def build_rtfm_lookup_table(self, page_types):
cache = {}
for key, page in page_types.items():
sub = cache[key] = {}
async with self.bot.session.get(page + '/objects.inv') as resp:
if resp.status != 200:
raise RuntimeError('Cannot build rtfm lookup table, try again later.')
stream = SphinxObjectFileReader(await resp.read())
cache[key] = self.parse_object_inv(stream, page)
self._rtfm_cache = cache
async def do_rtfm(self, ctx, key, obj):
page_types = {
'latest': 'https://discordpy.readthedocs.io/en/latest',
'latest-jp': 'https://discordpy.readthedocs.io/ja/latest',
'python': 'https://docs.python.org/3',
'python-jp': 'https://docs.python.org/ja/3',
}
if obj is None:
await ctx.send(page_types[key])
return
if not hasattr(self, '_rtfm_cache'):
await ctx.trigger_typing()
await self.build_rtfm_lookup_table(page_types)
obj = re.sub(r'^(?:discord\.(?:ext\.)?)?(?:commands\.)?(.+)', r'\1', obj)
if key.startswith('latest'):
# point the abc.Messageable types properly:
q = obj.lower()
for name in dir(discord.abc.Messageable):
if name[0] == '_':
continue
if q == name:
obj = f'abc.Messageable.{name}'
break
cache = list(self._rtfm_cache[key].items())
def transform(tup):
return tup[0]
matches = fuzzy.finder(obj, cache, key=lambda t: t[0], lazy=False)[:8]
e = discord.Embed(colour=discord.Colour.blurple())
if len(matches) == 0:
return await ctx.send('Could not find anything. Sorry.')
e.description = '\n'.join(f'[`{key}`]({url})' for key, url in matches)
await ctx.send(embed=e)
def transform_rtfm_language_key(self, ctx, prefix):
if ctx.guild is not None:
# 日本語 category
if ctx.channel.category_id == 490287576670928914:
return prefix + '-jp'
# d.py unofficial JP
elif ctx.guild.id == 463986890190749698:
return prefix + '-jp'
return prefix
@commands.group(aliases=['rtfd'], invoke_without_command=True)
async def rtfm(self, ctx, *, obj: str = None):
"""Gives you a documentation link for a discord.py entity.
Events, objects, and functions are all supported through a
a cruddy fuzzy algorithm.
"""
key = self.transform_rtfm_language_key(ctx, 'latest')
await self.do_rtfm(ctx, key, obj)
@rtfm.command(name='jp')
async def rtfm_jp(self, ctx, *, obj: str = None):
"""Gives you a documentation link for a discord.py entity (Japanese)."""
await self.do_rtfm(ctx, 'latest-jp', obj)
@rtfm.command(name='python', aliases=['py'])
async def rtfm_python(self, ctx, *, obj: str = None):
"""Gives you a documentation link for a Python entity."""
key = self.transform_rtfm_language_key(ctx, 'python')
await self.do_rtfm(ctx, key, obj)
@rtfm.command(name='py-jp', aliases=['py-ja'])
async def rtfm_p
|
ython_jp(self,
|
identifier_name
|
|
program.py
|
= re.compile("|".join(rep.keys()))
converted_language = pattern.sub(lambda m: rep[re.escape(m.group(0))], result['language'])
limit = 1024 - (29 + len(converted_language))
output = f"```{converted_language}\n{output[:limit]}```{(len(output) > limit) * (newline + '**Output shortened**')}"
embed.add_field(name="Output", value=output or "**No output**")
try:
await ctx.reply(embed=embed)
except:
await ctx.reply(output)
def parse_object_inv(self, stream, url):
# key: URL
# n.b.: key doesn't have `discord` or `discord.ext.commands` namespaces
result = {}
# first line is version info
inv_version = stream.readline().rstrip()
if inv_version != '# Sphinx inventory version 2':
raise RuntimeError('Invalid objects.inv file version.')
# next line is "# Project: <name>"
# then after that is "# Version: <version>"
projname = stream.readline().rstrip()[11:]
version = stream.readline().rstrip()[11:]
# next line says if it's a zlib header
line = stream.readline()
if 'zlib' not in line:
raise RuntimeError('Invalid objects.inv file, not z-lib compatible.')
# This code mostly comes from the Sphinx repository.
entry_regex = re.compile(r'(?x)(.+?)\s+(\S*:\S*)\s+(-?\d+)\s+(\S+)\s+(.*)')
for line in stream.read_compressed_lines():
match = entry_regex.match(line.rstrip())
if not match:
continue
name, directive, prio, location, dispname = match.groups()
domain, _, subdirective = directive.partition(':')
if directive == 'py:module' and name in result:
# From the Sphinx Repository:
# due to a bug in 1.1 and below,
# two inventory entries are created
# for Python modules, and the first
# one is correct
continue
# Most documentation pages have a label
if directive == 'std:doc':
subdirective = 'label'
if location.endswith('$'):
location = location[:-1] + name
key = name if dispname == '-' else dispname
prefix = f'{subdirective}:' if domain == 'std' else ''
if projname == 'discord.py':
key = key.replace('discord.ext.commands.', '').replace('discord.', '')
result[f'{prefix}{key}'] = os.path.join(url, location)
return result
async def build_rtfm_lookup_table(self, page_types):
cache = {}
for key, page in page_types.items():
sub = cache[key] = {}
async with self.bot.session.get(page + '/objects.inv') as resp:
if resp.status != 200:
raise RuntimeError('Cannot build rtfm lookup table, try again later.')
stream = SphinxObjectFileReader(await resp.read())
cache[key] = self.parse_object_inv(stream, page)
self._rtfm_cache = cache
async def do_rtfm(self, ctx, key, obj):
page_types = {
'latest': 'https://discordpy.readthedocs.io/en/latest',
'latest-jp': 'https://discordpy.readthedocs.io/ja/latest',
'python': 'https://docs.python.org/3',
'python-jp': 'https://docs.python.org/ja/3',
}
if obj is None:
await ctx.send(page_types[key])
return
if not hasattr(self, '_rtfm_cache'):
await ctx.trigger_typing()
await self.build_rtfm_lookup_table(page_types)
obj = re.sub(r'^(?:discord\.(?:ext\.)?)?(?:commands\.)?(.+)', r'\1', obj)
if key.startswith('latest'):
# point the abc.Messageable types properly:
q = obj.lower()
for name in dir(discord.abc.Messageable):
if name[0] == '_':
continue
if q == name:
obj = f'abc.Messageable.{name}'
break
cache = list(self._rtfm_cache[key].items())
def transform(tup):
return tup[0]
matches = fuzzy.finder(obj, cache, key=lambda t: t[0], lazy=False)[:8]
e = discord.Embed(colour=discord.Colour.blurple())
if len(matches) == 0:
return await ctx.send('Could not find anything. Sorry.')
e.description = '\n'.join(f'[`{key}`]({url})' for key, url in matches)
await ctx.send(embed=e)
def transform_rtfm_language_key(self, ctx, prefix):
if ctx.guild is not None:
# 日本語 category
if ctx.channel.category_id == 490287576670928914:
return prefix + '-jp'
# d.py unofficial JP
elif ctx.guild.id == 463986890190749698:
return prefix + '-jp'
return prefix
@commands.group(aliases=['rtfd'], invoke_without_command=True)
async def rtfm(self, ctx, *, obj: str = None):
"""Gives you a documentation link for a discord.py entity.
Events, objects, and functions are all supported through a
a cruddy fuzzy algorithm.
"""
key = self.transform_rtfm_language_key(ctx, 'latest')
await self.do_rtfm(ctx, key, obj)
@rtfm.command(name='jp')
async def rtfm_jp(self, ctx, *, obj: str = None):
"""Gives you a documentation link for a discord.py entity (Japanese)."""
await self.do_rtfm(ctx, 'latest-jp', obj)
@rtfm.command(name='python', aliases=['py'])
async def rtfm_python(self, ctx, *, obj: str = None):
"""Gives you a documentation link for a Python entity."""
key = self.transform_rtfm_language_key(ctx, 'python')
await self.do_rtfm(ctx, key, obj)
@rtfm.command(name='py-jp', aliases=['py-ja'])
async def rtfm_python_jp(self, ctx, *, obj: str = None):
"""Gives you a documentation link for a Python entity (Japanese)."""
await self.do_rtfm(ctx, 'python-jp', obj)
async def _member_stats(self, ctx, member, total_uses):
e = discord.Embed(title='RTFM Stats')
e.set_author(name=str(member), icon_url=member.avatar_url)
@commands.command(name="evalute",description="コードを評価します")
async def evalute(self,ctx, n, *, code):
nm = n.lower()
a = code.replace("```", "")
if nm == "py":
b = (piston.execute(language="py", version="3.9", code=a))
c = str(b)
em = discord.Embed(title="Python Code Output!",
description=f'```py\nOutput:\n{c}```',
color=discord.Color.red())
elif nm == "java":
b = (piston.execute(language="java", version="15.0.2", code=a))
c = str(b)
em = discord.Embed(title="Java Code Output!",
description=f'```py\n出力:\n{c}```',
color=discord.Color.red())
elif nm == "js":
b = (piston.execute(language="js", version="15.10.0", code=a))
c = str(b)
em = discord.Embed(title="JavaScript Code Output!",
description=f'```py\nOutput:\n{c}```',
color=discord.Color.red())
elif nm == "go":
b = (piston.execute(language="go", version="1.16.2", code=a))
c = str(b)
em = discord.Embed(title="Go Code Output!",
description=f'```py\nOutput:\n{c}```',
color=discord.Color.red())
elif nm == "ts":
b = (piston.execute(language="typescript", version="4.2.3", code=a))
c = str(b)
em = discord.Embed(title="TypeScript Code Output!",
description=f'```py\nOutput:\n{c}```',
color=discord.Color.red())
elif nm == "bf":
b = (piston.execute(language="brainfuck", version="2.7.3", code=a))
c = str(b)
em = discord.Embed(title="BrainFuck Code Output!",
description=f'```py\nOutput:\n{c}```',
color=discord.Color.red())
elif nm == "php":
b = (piston.execute(language="php",version="8.0",code=a))
c = str(b)
em = discord.Embed(title="PHP code出力",
description=f'```py\nOutput:\n{c}```',
color=discord.Color.red())
else:
em = discord.Embed(title="その言語は対応していません")
await ctx.send(embed=em)
def setup(client):
client.add_cog(ProgrammingCog(client))
|
identifier_body
|
||
program.py
|
(r"(\w*)\s*(?:```)(\w*)?([\s\S]*)(?:```$)")
@property
def session(self):
return self.bot.http._HTTPClient__session
async def _run_code(self, *, lang: str, code: str):
res = await self.session.post(
"https://emkc.org/api/v1/piston/execute",
json={"language": lang, "source": code})
return await res.json()
@commands.command()
async def run(self, ctx: commands.Context, *, codeblock: str):
"""
Run code and get results instantly
**Note**: You must use codeblocks around the code
Supported languages: awk, bash, brainfuck, c, cpp, crystal, csharp, d, dash, deno, elixer, emacs, go, haskell, java, jelly, julia, kotlin, lisp, lua, nasm, nasm64, nim, node, osabie, paradoc, perl, php, prolog, python2, python, ruby, rust, scala, swift, typescript, zig
"""
matches = self.regex.findall(codeblock)
if not matches:
return await ctx.reply(embed=discord.Embed(title="Uh-oh", description="Couldn't quite see your codeblock"))
lang = matches[0][0] or matches[0][1]
if not lang:
return await ctx.reply(embed=discord.Embed(title="Uh-oh",
description="Couldn't find the language hinted in the codeblock or before it"))
code = matches[0][2]
result = await self._run_code(lang=lang, code=code)
await self._send_result(ctx, result)
@commands.command()
async def runl(self, ctx: commands.Context, lang: str, *, code: str):
"""
Run a single line of code, **must** specify language as first argument
Supported languages: awk, bash, brainfuck, c, cpp, crystal, csharp, d, dash, deno, elixer, emacs, go, haskell, java, jelly, julia, kotlin, lisp, lua, nasm, nasm64, nim, node, osabie, paradoc, perl, php, prolog, python2, python, ruby, rust, scala, swift, typescript, zig
"""
result = await self._run_code(lang=lang, code=code)
await self._send_result(ctx, result)
async def _send_result(self, ctx: commands.Context, result: dict):
if "message" in result:
return await ctx.reply(embed=discord.Embed(title="Uh-oh", description=result["message"]))
output = result['output']
# if len(output) > 2000:
# url = await create_guest_paste_bin(self.session, output)
# return await ctx.reply("Your output was too long, so here's the pastebin link " + url)
embed = discord.Embed(
title=f"{result['language'][0].upper() + result['language'][1:]}")
newline = '\n'
rep = {"python3": "py", "python2": "py", 'node': 'js'}
rep = dict((re.escape(k), v) for k, v in rep.items())
pattern = re.compile("|".join(rep.keys()))
converted_language = pattern.sub(lambda m: rep[re.escape(m.group(0))], result['language'])
limit = 1024 - (29 + len(converted_language))
output = f"```{converted_language}\n{output[:limit]}```{(len(output) > limit) * (newline + '**Output shortened**')}"
embed.add_field(name="Output", value=output or "**No output**")
try:
await ctx.reply(embed=embed)
except:
await ctx.reply(output)
def parse_object_inv(self, stream, url):
# key: URL
# n.b.: key doesn't have `discord` or `discord.ext.commands` namespaces
result = {}
# first line is version info
inv_version = stream.readline().rstrip()
if inv_version != '# Sphinx inventory version 2':
raise RuntimeError('Invalid objects.inv file version.')
# next line is "# Project: <name>"
# then after that is "# Version: <version>"
projname = stream.readline().rstrip()[11:]
version = stream.readline().rstrip()[11:]
# next line says if it's a zlib header
line = stream.readline()
if 'zlib' not in line:
|
# This code mostly comes from the Sphinx repository.
entry_regex = re.compile(r'(?x)(.+?)\s+(\S*:\S*)\s+(-?\d+)\s+(\S+)\s+(.*)')
for line in stream.read_compressed_lines():
match = entry_regex.match(line.rstrip())
if not match:
continue
name, directive, prio, location, dispname = match.groups()
domain, _, subdirective = directive.partition(':')
if directive == 'py:module' and name in result:
# From the Sphinx Repository:
# due to a bug in 1.1 and below,
# two inventory entries are created
# for Python modules, and the first
# one is correct
continue
# Most documentation pages have a label
if directive == 'std:doc':
subdirective = 'label'
if location.endswith('$'):
location = location[:-1] + name
key = name if dispname == '-' else dispname
prefix = f'{subdirective}:' if domain == 'std' else ''
if projname == 'discord.py':
key = key.replace('discord.ext.commands.', '').replace('discord.', '')
result[f'{prefix}{key}'] = os.path.join(url, location)
return result
async def build_rtfm_lookup_table(self, page_types):
cache = {}
for key, page in page_types.items():
sub = cache[key] = {}
async with self.bot.session.get(page + '/objects.inv') as resp:
if resp.status != 200:
raise RuntimeError('Cannot build rtfm lookup table, try again later.')
stream = SphinxObjectFileReader(await resp.read())
cache[key] = self.parse_object_inv(stream, page)
self._rtfm_cache = cache
async def do_rtfm(self, ctx, key, obj):
page_types = {
'latest': 'https://discordpy.readthedocs.io/en/latest',
'latest-jp': 'https://discordpy.readthedocs.io/ja/latest',
'python': 'https://docs.python.org/3',
'python-jp': 'https://docs.python.org/ja/3',
}
if obj is None:
await ctx.send(page_types[key])
return
if not hasattr(self, '_rtfm_cache'):
await ctx.trigger_typing()
await self.build_rtfm_lookup_table(page_types)
obj = re.sub(r'^(?:discord\.(?:ext\.)?)?(?:commands\.)?(.+)', r'\1', obj)
if key.startswith('latest'):
# point the abc.Messageable types properly:
q = obj.lower()
for name in dir(discord.abc.Messageable):
if name[0] == '_':
continue
if q == name:
obj = f'abc.Messageable.{name}'
break
cache = list(self._rtfm_cache[key].items())
def transform(tup):
return tup[0]
matches = fuzzy.finder(obj, cache, key=lambda t: t[0], lazy=False)[:8]
e = discord.Embed(colour=discord.Colour.blurple())
if len(matches) == 0:
return await ctx.send('Could not find anything. Sorry.')
e.description = '\n'.join(f'[`{key}`]({url})' for key, url in matches)
await ctx.send(embed=e)
def transform_rtfm_language_key(self, ctx, prefix):
if ctx.guild is not None:
# 日本語 category
if ctx.channel.category_id == 490287576670928914:
return prefix + '-jp'
# d.py unofficial JP
elif ctx.guild.id == 463986890190749698:
return prefix + '-jp'
return prefix
@commands.group(aliases=['rtfd'], invoke_without_command=True)
async def rtfm(self, ctx, *, obj: str = None):
"""Gives you a documentation link for a discord.py entity.
Events, objects, and functions are all supported through a
a cruddy fuzzy algorithm.
"""
key = self.transform_rtfm_language_key(ctx, 'latest')
await self.do_rtfm(ctx, key, obj)
@rtfm.command(name='jp')
async def rtfm_jp(self, ctx, *, obj: str = None):
"""Gives you a documentation link for a discord.py entity (Japanese)."""
await self.do_rtfm(ctx, 'latest-jp', obj)
@rtfm.command(name='python', aliases=['py'])
async def rtfm_python(self, ctx, *, obj:
|
raise RuntimeError('Invalid objects.inv file, not z-lib compatible.')
|
conditional_block
|
packer.rs
|
let (slate_bin, encrypted) = pack.to_binary(slate_version, secret, use_test_rng)?;
SlatepackArmor::encode(&slate_bin, encrypted)
}
/// return slatepack
pub fn decrypt_slatepack(data: &[u8], dec_key: &DalekSecretKey) -> Result<Self, Error> {
let (slate_bytes, encrypted) = SlatepackArmor::decode(data)?;
let slatepack = Slatepack::from_binary(&slate_bytes, encrypted, dec_key)?;
let Slatepack {
sender,
recipient,
content,
slate,
} = slatepack;
Ok(Self {
sender,
recipient,
content,
slate,
})
}
/// Get Transaction ID related into form this slatepack
pub fn get_content(&self) -> SlatePurpose {
self.content.clone()
}
/// Get Sender info. It is needed to send the response back
pub fn get_sender(&self) -> Option<DalekPublicKey>
|
/// Get Sender info. It is needed to send the response back
pub fn get_recipient(&self) -> Option<DalekPublicKey> {
self.recipient.clone()
}
/// Convert this slate back to the resulting slate. Since the slate pack contain only the change set,
/// to recover the data it is required original slate to merge with.
pub fn to_result_slate(self) -> Slate {
self.slate
}
}
#[test]
fn slatepack_io_test() {
use crate::grin_core::core::KernelFeatures;
use crate::grin_core::core::{Input, Output, OutputFeatures, Transaction, TxKernel};
use crate::grin_core::global;
use crate::grin_keychain::BlindingFactor;
use crate::grin_keychain::ExtKeychain;
use crate::grin_util as util;
use crate::grin_util::secp::pedersen::{Commitment, RangeProof};
use crate::grin_util::secp::Signature;
use crate::grin_util::secp::{PublicKey, Secp256k1, SecretKey};
use crate::proof::proofaddress;
use crate::proof::proofaddress::ProvableAddress;
use crate::slate::{PaymentInfo, VersionCompatInfo};
use crate::ParticipantData;
use uuid::Uuid;
use x25519_dalek::PublicKey as xDalekPublicKey;
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
let bytes_16: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let bytes_32: [u8; 32] = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32,
];
let bytes_32_2: [u8; 32] = [
2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33,
];
let bytes_33: [u8; 33] = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33,
];
let bytes_64: [u8; 64] = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
];
let sk = SecretKey::from_slice(&bytes_32).unwrap();
let secp = Secp256k1::new();
let dalek_sk = DalekSecretKey::from_bytes(&bytes_32).unwrap();
let dalek_pk = DalekPublicKey::from(&dalek_sk);
let dalek_sk2 = DalekSecretKey::from_bytes(&bytes_32_2).unwrap();
let dalek_pk2 = DalekPublicKey::from(&dalek_sk2);
// Let's test out Dalec 2 xDalec algebra.
let dalek_xpk = proofaddress::tor_pub_2_slatepack_pub(&dalek_pk).unwrap();
let dalek_xpk2 = proofaddress::tor_pub_2_slatepack_pub(&dalek_pk2).unwrap();
let dalek_xsk = proofaddress::tor_secret_2_slatepack_secret(&dalek_sk);
let dalek_xsk2 = proofaddress::tor_secret_2_slatepack_secret(&dalek_sk2);
let builded_xpk = xDalekPublicKey::from(&dalek_xsk);
let builded_xpk2 = xDalekPublicKey::from(&dalek_xsk2);
assert_eq!(dalek_xpk.as_bytes(), builded_xpk.as_bytes());
assert_eq!(dalek_xpk2.as_bytes(), builded_xpk2.as_bytes());
// check if Diffie Hoffman works...
let shared_secret1 = dalek_xsk.diffie_hellman(&dalek_xpk2);
let shared_secret2 = dalek_xsk2.diffie_hellman(&dalek_xpk);
assert_eq!(shared_secret1.as_bytes(), shared_secret2.as_bytes());
// Note, Slate Data is fake. Just some randome numbers, it will not pass validation of any type
let mut slate_enc = Slate {
compact_slate: true, // Slatepack works only for compact models.
num_participants: 2,
id: Uuid::from_bytes(bytes_16),
tx: Transaction::empty()
.with_offset(BlindingFactor::from_slice(&bytes_32) )
.with_input( Input::new( OutputFeatures::Plain, Commitment(bytes_33)) )
.with_output( Output::new(OutputFeatures::Plain, Commitment(bytes_33), RangeProof::zero()))
.with_kernel( TxKernel::with_features(KernelFeatures::Plain { fee: 321 }) ),
offset: BlindingFactor::from_slice(&bytes_32),
amount: 30000000000000000,
fee: 321,
height: 67,
lock_height: 0,
ttl_cutoff_height: Some(54),
participant_data: vec![
ParticipantData {
id: 0,
public_blind_excess: PublicKey::from_secret_key( &secp, &sk).unwrap(),
public_nonce: PublicKey::from_secret_key( &secp, &sk).unwrap(),
part_sig: None,
message: Some("message 1 to send".to_string()),
message_sig: Some(Signature::from_compact(&util::from_hex("89cc3c1480fea655f29d300fcf68d0cfbf53f96a1d6b1219486b643
|
{
self.sender.clone()
}
|
identifier_body
|
packer.rs
|
let (slate_bin, encrypted) = pack.to_binary(slate_version, secret, use_test_rng)?;
SlatepackArmor::encode(&slate_bin, encrypted)
}
/// return slatepack
pub fn decrypt_slatepack(data: &[u8], dec_key: &DalekSecretKey) -> Result<Self, Error> {
let (slate_bytes, encrypted) = SlatepackArmor::decode(data)?;
let slatepack = Slatepack::from_binary(&slate_bytes, encrypted, dec_key)?;
let Slatepack {
sender,
recipient,
content,
slate,
} = slatepack;
Ok(Self {
sender,
recipient,
content,
slate,
})
}
/// Get Transaction ID related into form this slatepack
pub fn
|
(&self) -> SlatePurpose {
self.content.clone()
}
/// Get Sender info. It is needed to send the response back
pub fn get_sender(&self) -> Option<DalekPublicKey> {
self.sender.clone()
}
/// Get Sender info. It is needed to send the response back
pub fn get_recipient(&self) -> Option<DalekPublicKey> {
self.recipient.clone()
}
/// Convert this slate back to the resulting slate. Since the slate pack contain only the change set,
/// to recover the data it is required original slate to merge with.
pub fn to_result_slate(self) -> Slate {
self.slate
}
}
#[test]
fn slatepack_io_test() {
use crate::grin_core::core::KernelFeatures;
use crate::grin_core::core::{Input, Output, OutputFeatures, Transaction, TxKernel};
use crate::grin_core::global;
use crate::grin_keychain::BlindingFactor;
use crate::grin_keychain::ExtKeychain;
use crate::grin_util as util;
use crate::grin_util::secp::pedersen::{Commitment, RangeProof};
use crate::grin_util::secp::Signature;
use crate::grin_util::secp::{PublicKey, Secp256k1, SecretKey};
use crate::proof::proofaddress;
use crate::proof::proofaddress::ProvableAddress;
use crate::slate::{PaymentInfo, VersionCompatInfo};
use crate::ParticipantData;
use uuid::Uuid;
use x25519_dalek::PublicKey as xDalekPublicKey;
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
let bytes_16: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let bytes_32: [u8; 32] = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32,
];
let bytes_32_2: [u8; 32] = [
2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33,
];
let bytes_33: [u8; 33] = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33,
];
let bytes_64: [u8; 64] = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
];
let sk = SecretKey::from_slice(&bytes_32).unwrap();
let secp = Secp256k1::new();
let dalek_sk = DalekSecretKey::from_bytes(&bytes_32).unwrap();
let dalek_pk = DalekPublicKey::from(&dalek_sk);
let dalek_sk2 = DalekSecretKey::from_bytes(&bytes_32_2).unwrap();
let dalek_pk2 = DalekPublicKey::from(&dalek_sk2);
// Let's test out Dalec 2 xDalec algebra.
let dalek_xpk = proofaddress::tor_pub_2_slatepack_pub(&dalek_pk).unwrap();
let dalek_xpk2 = proofaddress::tor_pub_2_slatepack_pub(&dalek_pk2).unwrap();
let dalek_xsk = proofaddress::tor_secret_2_slatepack_secret(&dalek_sk);
let dalek_xsk2 = proofaddress::tor_secret_2_slatepack_secret(&dalek_sk2);
let builded_xpk = xDalekPublicKey::from(&dalek_xsk);
let builded_xpk2 = xDalekPublicKey::from(&dalek_xsk2);
assert_eq!(dalek_xpk.as_bytes(), builded_xpk.as_bytes());
assert_eq!(dalek_xpk2.as_bytes(), builded_xpk2.as_bytes());
// check if Diffie Hoffman works...
let shared_secret1 = dalek_xsk.diffie_hellman(&dalek_xpk2);
let shared_secret2 = dalek_xsk2.diffie_hellman(&dalek_xpk);
assert_eq!(shared_secret1.as_bytes(), shared_secret2.as_bytes());
// Note, Slate Data is fake. Just some randome numbers, it will not pass validation of any type
let mut slate_enc = Slate {
compact_slate: true, // Slatepack works only for compact models.
num_participants: 2,
id: Uuid::from_bytes(bytes_16),
tx: Transaction::empty()
.with_offset(BlindingFactor::from_slice(&bytes_32) )
.with_input( Input::new( OutputFeatures::Plain, Commitment(bytes_33)) )
.with_output( Output::new(OutputFeatures::Plain, Commitment(bytes_33), RangeProof::zero()))
.with_kernel( TxKernel::with_features(KernelFeatures::Plain { fee: 321 }) ),
offset: BlindingFactor::from_slice(&bytes_32),
amount: 30000000000000000,
fee: 321,
height: 67,
lock_height: 0,
ttl_cutoff_height: Some(54),
participant_data: vec![
ParticipantData {
id: 0,
public_blind_excess: PublicKey::from_secret_key( &secp, &sk).unwrap(),
public_nonce: PublicKey::from_secret_key( &secp, &sk).unwrap(),
part_sig: None,
message: Some("message 1 to send".to_string()),
message_sig: Some(Signature::from_compact(&util::from_hex("89cc3c1480fea655f29d300fcf68d0cfbf53f96a1d6b1219486b643
|
get_content
|
identifier_name
|
packer.rs
|
let (slate_bin, encrypted) = pack.to_binary(slate_version, secret, use_test_rng)?;
SlatepackArmor::encode(&slate_bin, encrypted)
}
/// return slatepack
pub fn decrypt_slatepack(data: &[u8], dec_key: &DalekSecretKey) -> Result<Self, Error> {
let (slate_bytes, encrypted) = SlatepackArmor::decode(data)?;
let slatepack = Slatepack::from_binary(&slate_bytes, encrypted, dec_key)?;
let Slatepack {
sender,
recipient,
content,
slate,
} = slatepack;
Ok(Self {
sender,
recipient,
content,
slate,
})
}
/// Get Transaction ID related into form this slatepack
pub fn get_content(&self) -> SlatePurpose {
self.content.clone()
}
/// Get Sender info. It is needed to send the response back
pub fn get_sender(&self) -> Option<DalekPublicKey> {
self.sender.clone()
}
/// Get Sender info. It is needed to send the response back
pub fn get_recipient(&self) -> Option<DalekPublicKey> {
self.recipient.clone()
}
/// Convert this slate back to the resulting slate. Since the slate pack contain only the change set,
/// to recover the data it is required original slate to merge with.
pub fn to_result_slate(self) -> Slate {
self.slate
}
}
#[test]
fn slatepack_io_test() {
use crate::grin_core::core::KernelFeatures;
use crate::grin_core::core::{Input, Output, OutputFeatures, Transaction, TxKernel};
use crate::grin_core::global;
use crate::grin_keychain::BlindingFactor;
use crate::grin_keychain::ExtKeychain;
use crate::grin_util as util;
use crate::grin_util::secp::pedersen::{Commitment, RangeProof};
use crate::grin_util::secp::Signature;
use crate::grin_util::secp::{PublicKey, Secp256k1, SecretKey};
use crate::proof::proofaddress;
use crate::proof::proofaddress::ProvableAddress;
use crate::slate::{PaymentInfo, VersionCompatInfo};
use crate::ParticipantData;
use uuid::Uuid;
use x25519_dalek::PublicKey as xDalekPublicKey;
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
let bytes_16: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
let bytes_32: [u8; 32] = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32,
];
let bytes_32_2: [u8; 32] = [
2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33,
];
let bytes_33: [u8; 33] = [
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33,
];
let bytes_64: [u8; 64] = [
|
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
];
let sk = SecretKey::from_slice(&bytes_32).unwrap();
let secp = Secp256k1::new();
let dalek_sk = DalekSecretKey::from_bytes(&bytes_32).unwrap();
let dalek_pk = DalekPublicKey::from(&dalek_sk);
let dalek_sk2 = DalekSecretKey::from_bytes(&bytes_32_2).unwrap();
let dalek_pk2 = DalekPublicKey::from(&dalek_sk2);
// Let's test out Dalec 2 xDalec algebra.
let dalek_xpk = proofaddress::tor_pub_2_slatepack_pub(&dalek_pk).unwrap();
let dalek_xpk2 = proofaddress::tor_pub_2_slatepack_pub(&dalek_pk2).unwrap();
let dalek_xsk = proofaddress::tor_secret_2_slatepack_secret(&dalek_sk);
let dalek_xsk2 = proofaddress::tor_secret_2_slatepack_secret(&dalek_sk2);
let builded_xpk = xDalekPublicKey::from(&dalek_xsk);
let builded_xpk2 = xDalekPublicKey::from(&dalek_xsk2);
assert_eq!(dalek_xpk.as_bytes(), builded_xpk.as_bytes());
assert_eq!(dalek_xpk2.as_bytes(), builded_xpk2.as_bytes());
// check if Diffie Hoffman works...
let shared_secret1 = dalek_xsk.diffie_hellman(&dalek_xpk2);
let shared_secret2 = dalek_xsk2.diffie_hellman(&dalek_xpk);
assert_eq!(shared_secret1.as_bytes(), shared_secret2.as_bytes());
// Note, Slate Data is fake. Just some randome numbers, it will not pass validation of any type
let mut slate_enc = Slate {
compact_slate: true, // Slatepack works only for compact models.
num_participants: 2,
id: Uuid::from_bytes(bytes_16),
tx: Transaction::empty()
.with_offset(BlindingFactor::from_slice(&bytes_32) )
.with_input( Input::new( OutputFeatures::Plain, Commitment(bytes_33)) )
.with_output( Output::new(OutputFeatures::Plain, Commitment(bytes_33), RangeProof::zero()))
.with_kernel( TxKernel::with_features(KernelFeatures::Plain { fee: 321 }) ),
offset: BlindingFactor::from_slice(&bytes_32),
amount: 30000000000000000,
fee: 321,
height: 67,
lock_height: 0,
ttl_cutoff_height: Some(54),
participant_data: vec![
ParticipantData {
id: 0,
public_blind_excess: PublicKey::from_secret_key( &secp, &sk).unwrap(),
public_nonce: PublicKey::from_secret_key( &secp, &sk).unwrap(),
part_sig: None,
message: Some("message 1 to send".to_string()),
message_sig: Some(Signature::from_compact(&util::from_hex("89cc3c1480fea655f29d300fcf68d0cfbf53f96a1d6b1219486b6438
|
random_line_split
|
|
json.go
|
.SubmitRequirements[i], err = r.ToProto(); err != nil {
return nil, err
}
}
}
if ci.Reviewers != nil {
ret.Reviewers = &gerritpb.ReviewerStatusMap{}
if accs, exist := ci.Reviewers["REVIEWER"]; exist {
ret.Reviewers.Reviewers = make([]*gerritpb.AccountInfo, len(accs))
for i, acc := range accs {
ret.Reviewers.Reviewers[i] = acc.ToProto()
}
}
if accs, exist := ci.Reviewers["CC"]; exist {
ret.Reviewers.Ccs = make([]*gerritpb.AccountInfo, len(accs))
for i, acc := range accs {
ret.Reviewers.Ccs[i] = acc.ToProto()
}
}
if accs, exist := ci.Reviewers["REMOVED"]; exist {
ret.Reviewers.Removed = make([]*gerritpb.AccountInfo, len(accs))
for i, acc := range accs {
ret.Reviewers.Ccs[i] = acc.ToProto()
}
}
}
return ret, nil
}
type labelInfo struct {
Optional bool `json:"optional"`
Approved *accountInfo `json:"approved"`
Rejected *accountInfo `json:"rejected"`
Recommended *accountInfo `json:"recommended"`
Disliked *accountInfo `json:"disliked"`
Blocking bool `json:"blocking"`
Value int32 `json:"value"`
DefaultValue int32 `json:"default_value"`
All []*approvalInfo `json:"all"`
Values map[string]string `json:"values"`
}
func (li *labelInfo) ToProto() *gerritpb.LabelInfo {
ret := &gerritpb.LabelInfo{
Optional: li.Optional,
Approved: li.Approved.ToProto(),
Rejected: li.Rejected.ToProto(),
Recommended: li.Recommended.ToProto(),
Disliked: li.Disliked.ToProto(),
Blocking: li.Blocking,
Value: li.Value,
DefaultValue: li.DefaultValue,
}
if len(li.All) > 0 {
ret.All = make([]*gerritpb.ApprovalInfo, len(li.All))
for i, a := range li.All {
ret.All[i] = a.ToProto()
}
}
if li.Values != nil {
ret.Values = make(map[int32]string, len(li.Values))
for value, description := range li.Values {
i, err := strconv.ParseInt(strings.TrimSpace(value), 10, 32)
// Error is silently ignored for consistency with other parts of code.
if err == nil {
ret.Values[int32(i)] = description
}
}
}
return ret
}
type approvalInfo struct {
accountInfo
Value int32 `json:"value"`
PermittedVotingRange *gerritpb.VotingRangeInfo `json:"permitted_voting_range"`
Date Timestamp `json:"date"`
Tag string `json:"tag"`
PostSubmit bool `json:"post_submit"`
}
func (ai *approvalInfo) ToProto() *gerritpb.ApprovalInfo {
ret := &gerritpb.ApprovalInfo{
User: ai.accountInfo.ToProto(),
Value: ai.Value,
PermittedVotingRange: ai.PermittedVotingRange,
Date: timestamppb.New(ai.Date.Time),
Tag: ai.Tag,
PostSubmit: ai.PostSubmit,
}
return ret
}
type changeMessageInfo struct {
ID string `json:"id"`
Author *accountInfo `json:"author"`
RealAuthor *accountInfo `json:"real_author"`
Date Timestamp `json:"date"`
Message string `json:"message"`
Tag string `json:"tag"`
}
func (cmi *changeMessageInfo)
|
() *gerritpb.ChangeMessageInfo {
if cmi == nil {
return nil
}
return &gerritpb.ChangeMessageInfo{
Id: cmi.ID,
Author: cmi.Author.ToProto(),
RealAuthor: cmi.RealAuthor.ToProto(),
Date: timestamppb.New(cmi.Date.Time),
Message: cmi.Message,
Tag: cmi.Tag,
}
}
type requirement struct {
Status string `json:"status"`
FallbackText string `json:"fallback_text"`
Type string `json:"type"`
}
func (r *requirement) ToProto() (*gerritpb.Requirement, error) {
stringVal := "REQUIREMENT_STATUS_" + r.Status
numVal, found := gerritpb.Requirement_Status_value[stringVal]
if !found {
return nil, errors.Reason("no Status enum value for %q", r.Status).Err()
}
return &gerritpb.Requirement{
Status: gerritpb.Requirement_Status(numVal),
FallbackText: r.FallbackText,
Type: r.Type,
}, nil
}
type fileInfo struct {
LinesInserted int32 `json:"lines_inserted"`
LinesDeleted int32 `json:"lines_deleted"`
SizeDelta int64 `json:"size_delta"`
Size int64 `json:"size"`
}
func (fi *fileInfo) ToProto() *gerritpb.FileInfo {
return &gerritpb.FileInfo{
LinesInserted: fi.LinesInserted,
LinesDeleted: fi.LinesDeleted,
SizeDelta: fi.SizeDelta,
Size: fi.Size,
}
}
type revisionInfo struct {
Kind string `json:"kind"`
Number int `json:"_number"`
Uploader *accountInfo `json:"uploader"`
Ref string `json:"ref"`
Created Timestamp `json:"created"`
Description string `json:"description"`
Files map[string]*fileInfo `json:"files"`
Commit *commitInfo `json:"commit"`
}
func (ri *revisionInfo) ToProto() *gerritpb.RevisionInfo {
ret := &gerritpb.RevisionInfo{
Number: int32(ri.Number),
Uploader: ri.Uploader.ToProto(),
Ref: ri.Ref,
Created: timestamppb.New(ri.Created.Time),
Description: ri.Description,
}
if v, ok := gerritpb.RevisionInfo_Kind_value[ri.Kind]; ok {
ret.Kind = gerritpb.RevisionInfo_Kind(v)
}
if ri.Files != nil {
ret.Files = make(map[string]*gerritpb.FileInfo, len(ri.Files))
for i, fi := range ri.Files {
ret.Files[i] = fi.ToProto()
}
}
if ri.Commit != nil {
ret.Commit = ri.Commit.ToProto()
}
return ret
}
// https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#git-person-info
type gitPersonInfo struct {
Name string `json:"name"`
Email string `json:"email"`
}
func (g *gitPersonInfo) ToProto() *gerritpb.GitPersonInfo {
return &gerritpb.GitPersonInfo{
Name: g.Name,
Email: g.Email,
}
}
// https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#commit-info
type commitInfo struct {
Commit string `json:"commit"`
Parents []*commitInfo `json:"parents"`
Author *gitPersonInfo `json:"author"`
Committer *gitPersonInfo `json:"committer"`
Subject string `json:"subject"`
Message string `json:"message"`
}
func (c *commitInfo) ToProto() *gerritpb.CommitInfo {
parents := make([]*gerritpb.CommitInfo_Parent, len(c.Parents))
for i, p := range c.Parents {
parents[i] = &gerritpb.CommitInfo_Parent{Id: p.Commit}
}
return &gerritpb.CommitInfo{
Id: c.Commit,
Parents: parents,
Message: c.Message,
Author: c.Author.ToProto(),
// TODO(tandrii): support other fields once added.
}
}
// https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#related-change-and-commit-info
type relatedChangeAndCommitInfo struct {
Project string `json:"project"`
ChangeID string `json:"change_id"`
Commit commitInfo `json:"commit"`
Number int64 `json:"_change_number"`
Patchset int64 `json:"_revision_number"`
CurrentPatchset int64 `json:"_current_revision_number"`
// json.Unmarshal cannot convert enum string to value,
// so this field is handled specially in ToProto.
Status string `json:"status"`
}
func (r *relatedChangeAndCommitInfo) ToProto() *gerritpb.GetRelatedChangesResponse_ChangeAndCommit {
return &gerritpb.GetRelatedChangesResponse_Change
|
ToProto
|
identifier_name
|
json.go
|
Requirements[i], err = r.ToProto(); err != nil {
return nil, err
}
}
}
if ci.Reviewers != nil {
ret.Reviewers = &gerritpb.ReviewerStatusMap{}
if accs, exist := ci.Reviewers["REVIEWER"]; exist {
ret.Reviewers.Reviewers = make([]*gerritpb.AccountInfo, len(accs))
for i, acc := range accs {
ret.Reviewers.Reviewers[i] = acc.ToProto()
}
}
if accs, exist := ci.Reviewers["CC"]; exist {
ret.Reviewers.Ccs = make([]*gerritpb.AccountInfo, len(accs))
for i, acc := range accs {
ret.Reviewers.Ccs[i] = acc.ToProto()
}
}
if accs, exist := ci.Reviewers["REMOVED"]; exist {
ret.Reviewers.Removed = make([]*gerritpb.AccountInfo, len(accs))
for i, acc := range accs {
ret.Reviewers.Ccs[i] = acc.ToProto()
}
}
}
return ret, nil
}
type labelInfo struct {
Optional bool `json:"optional"`
Approved *accountInfo `json:"approved"`
Rejected *accountInfo `json:"rejected"`
Recommended *accountInfo `json:"recommended"`
Disliked *accountInfo `json:"disliked"`
Blocking bool `json:"blocking"`
Value int32 `json:"value"`
DefaultValue int32 `json:"default_value"`
All []*approvalInfo `json:"all"`
Values map[string]string `json:"values"`
}
func (li *labelInfo) ToProto() *gerritpb.LabelInfo {
ret := &gerritpb.LabelInfo{
Optional: li.Optional,
Approved: li.Approved.ToProto(),
Rejected: li.Rejected.ToProto(),
Recommended: li.Recommended.ToProto(),
Disliked: li.Disliked.ToProto(),
Blocking: li.Blocking,
Value: li.Value,
DefaultValue: li.DefaultValue,
}
if len(li.All) > 0 {
ret.All = make([]*gerritpb.ApprovalInfo, len(li.All))
for i, a := range li.All {
ret.All[i] = a.ToProto()
}
}
if li.Values != nil {
ret.Values = make(map[int32]string, len(li.Values))
for value, description := range li.Values {
i, err := strconv.ParseInt(strings.TrimSpace(value), 10, 32)
// Error is silently ignored for consistency with other parts of code.
if err == nil {
ret.Values[int32(i)] = description
}
}
}
return ret
}
type approvalInfo struct {
accountInfo
Value int32 `json:"value"`
PermittedVotingRange *gerritpb.VotingRangeInfo `json:"permitted_voting_range"`
Date Timestamp `json:"date"`
Tag string `json:"tag"`
PostSubmit bool `json:"post_submit"`
}
func (ai *approvalInfo) ToProto() *gerritpb.ApprovalInfo {
ret := &gerritpb.ApprovalInfo{
User: ai.accountInfo.ToProto(),
Value: ai.Value,
PermittedVotingRange: ai.PermittedVotingRange,
Date: timestamppb.New(ai.Date.Time),
Tag: ai.Tag,
PostSubmit: ai.PostSubmit,
}
return ret
}
type changeMessageInfo struct {
ID string `json:"id"`
Author *accountInfo `json:"author"`
RealAuthor *accountInfo `json:"real_author"`
Date Timestamp `json:"date"`
Message string `json:"message"`
Tag string `json:"tag"`
}
func (cmi *changeMessageInfo) ToProto() *gerritpb.ChangeMessageInfo
|
type requirement struct {
Status string `json:"status"`
FallbackText string `json:"fallback_text"`
Type string `json:"type"`
}
func (r *requirement) ToProto() (*gerritpb.Requirement, error) {
stringVal := "REQUIREMENT_STATUS_" + r.Status
numVal, found := gerritpb.Requirement_Status_value[stringVal]
if !found {
return nil, errors.Reason("no Status enum value for %q", r.Status).Err()
}
return &gerritpb.Requirement{
Status: gerritpb.Requirement_Status(numVal),
FallbackText: r.FallbackText,
Type: r.Type,
}, nil
}
type fileInfo struct {
LinesInserted int32 `json:"lines_inserted"`
LinesDeleted int32 `json:"lines_deleted"`
SizeDelta int64 `json:"size_delta"`
Size int64 `json:"size"`
}
func (fi *fileInfo) ToProto() *gerritpb.FileInfo {
return &gerritpb.FileInfo{
LinesInserted: fi.LinesInserted,
LinesDeleted: fi.LinesDeleted,
SizeDelta: fi.SizeDelta,
Size: fi.Size,
}
}
type revisionInfo struct {
Kind string `json:"kind"`
Number int `json:"_number"`
Uploader *accountInfo `json:"uploader"`
Ref string `json:"ref"`
Created Timestamp `json:"created"`
Description string `json:"description"`
Files map[string]*fileInfo `json:"files"`
Commit *commitInfo `json:"commit"`
}
func (ri *revisionInfo) ToProto() *gerritpb.RevisionInfo {
ret := &gerritpb.RevisionInfo{
Number: int32(ri.Number),
Uploader: ri.Uploader.ToProto(),
Ref: ri.Ref,
Created: timestamppb.New(ri.Created.Time),
Description: ri.Description,
}
if v, ok := gerritpb.RevisionInfo_Kind_value[ri.Kind]; ok {
ret.Kind = gerritpb.RevisionInfo_Kind(v)
}
if ri.Files != nil {
ret.Files = make(map[string]*gerritpb.FileInfo, len(ri.Files))
for i, fi := range ri.Files {
ret.Files[i] = fi.ToProto()
}
}
if ri.Commit != nil {
ret.Commit = ri.Commit.ToProto()
}
return ret
}
// https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#git-person-info
type gitPersonInfo struct {
Name string `json:"name"`
Email string `json:"email"`
}
func (g *gitPersonInfo) ToProto() *gerritpb.GitPersonInfo {
return &gerritpb.GitPersonInfo{
Name: g.Name,
Email: g.Email,
}
}
// https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#commit-info
type commitInfo struct {
Commit string `json:"commit"`
Parents []*commitInfo `json:"parents"`
Author *gitPersonInfo `json:"author"`
Committer *gitPersonInfo `json:"committer"`
Subject string `json:"subject"`
Message string `json:"message"`
}
func (c *commitInfo) ToProto() *gerritpb.CommitInfo {
parents := make([]*gerritpb.CommitInfo_Parent, len(c.Parents))
for i, p := range c.Parents {
parents[i] = &gerritpb.CommitInfo_Parent{Id: p.Commit}
}
return &gerritpb.CommitInfo{
Id: c.Commit,
Parents: parents,
Message: c.Message,
Author: c.Author.ToProto(),
// TODO(tandrii): support other fields once added.
}
}
// https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#related-change-and-commit-info
type relatedChangeAndCommitInfo struct {
Project string `json:"project"`
ChangeID string `json:"change_id"`
Commit commitInfo `json:"commit"`
Number int64 `json:"_change_number"`
Patchset int64 `json:"_revision_number"`
CurrentPatchset int64 `json:"_current_revision_number"`
// json.Unmarshal cannot convert enum string to value,
// so this field is handled specially in ToProto.
Status string `json:"status"`
}
func (r *relatedChangeAndCommitInfo) ToProto() *gerritpb.GetRelatedChangesResponse_ChangeAndCommit {
return &gerritpb.GetRelatedChangesResponse_Change
|
{
if cmi == nil {
return nil
}
return &gerritpb.ChangeMessageInfo{
Id: cmi.ID,
Author: cmi.Author.ToProto(),
RealAuthor: cmi.RealAuthor.ToProto(),
Date: timestamppb.New(cmi.Date.Time),
Message: cmi.Message,
Tag: cmi.Tag,
}
}
|
identifier_body
|
json.go
|
.SubmitRequirements[i], err = r.ToProto(); err != nil {
return nil, err
}
}
}
if ci.Reviewers != nil {
ret.Reviewers = &gerritpb.ReviewerStatusMap{}
if accs, exist := ci.Reviewers["REVIEWER"]; exist {
ret.Reviewers.Reviewers = make([]*gerritpb.AccountInfo, len(accs))
for i, acc := range accs {
ret.Reviewers.Reviewers[i] = acc.ToProto()
}
}
if accs, exist := ci.Reviewers["CC"]; exist {
ret.Reviewers.Ccs = make([]*gerritpb.AccountInfo, len(accs))
for i, acc := range accs {
ret.Reviewers.Ccs[i] = acc.ToProto()
}
}
if accs, exist := ci.Reviewers["REMOVED"]; exist {
ret.Reviewers.Removed = make([]*gerritpb.AccountInfo, len(accs))
for i, acc := range accs {
ret.Reviewers.Ccs[i] = acc.ToProto()
}
}
}
return ret, nil
}
type labelInfo struct {
Optional bool `json:"optional"`
Approved *accountInfo `json:"approved"`
Rejected *accountInfo `json:"rejected"`
Recommended *accountInfo `json:"recommended"`
Disliked *accountInfo `json:"disliked"`
Blocking bool `json:"blocking"`
Value int32 `json:"value"`
DefaultValue int32 `json:"default_value"`
All []*approvalInfo `json:"all"`
Values map[string]string `json:"values"`
}
func (li *labelInfo) ToProto() *gerritpb.LabelInfo {
ret := &gerritpb.LabelInfo{
Optional: li.Optional,
Approved: li.Approved.ToProto(),
Rejected: li.Rejected.ToProto(),
Recommended: li.Recommended.ToProto(),
Disliked: li.Disliked.ToProto(),
Blocking: li.Blocking,
Value: li.Value,
DefaultValue: li.DefaultValue,
}
if len(li.All) > 0 {
ret.All = make([]*gerritpb.ApprovalInfo, len(li.All))
for i, a := range li.All {
ret.All[i] = a.ToProto()
}
}
if li.Values != nil {
ret.Values = make(map[int32]string, len(li.Values))
for value, description := range li.Values {
i, err := strconv.ParseInt(strings.TrimSpace(value), 10, 32)
// Error is silently ignored for consistency with other parts of code.
if err == nil {
ret.Values[int32(i)] = description
}
}
}
return ret
}
type approvalInfo struct {
accountInfo
Value int32 `json:"value"`
PermittedVotingRange *gerritpb.VotingRangeInfo `json:"permitted_voting_range"`
Date Timestamp `json:"date"`
Tag string `json:"tag"`
PostSubmit bool `json:"post_submit"`
}
func (ai *approvalInfo) ToProto() *gerritpb.ApprovalInfo {
ret := &gerritpb.ApprovalInfo{
User: ai.accountInfo.ToProto(),
Value: ai.Value,
PermittedVotingRange: ai.PermittedVotingRange,
Date: timestamppb.New(ai.Date.Time),
Tag: ai.Tag,
PostSubmit: ai.PostSubmit,
}
return ret
}
type changeMessageInfo struct {
ID string `json:"id"`
Author *accountInfo `json:"author"`
RealAuthor *accountInfo `json:"real_author"`
Date Timestamp `json:"date"`
Message string `json:"message"`
Tag string `json:"tag"`
}
func (cmi *changeMessageInfo) ToProto() *gerritpb.ChangeMessageInfo {
if cmi == nil {
return nil
}
return &gerritpb.ChangeMessageInfo{
Id: cmi.ID,
Author: cmi.Author.ToProto(),
RealAuthor: cmi.RealAuthor.ToProto(),
Date: timestamppb.New(cmi.Date.Time),
Message: cmi.Message,
Tag: cmi.Tag,
}
}
type requirement struct {
Status string `json:"status"`
FallbackText string `json:"fallback_text"`
Type string `json:"type"`
}
func (r *requirement) ToProto() (*gerritpb.Requirement, error) {
stringVal := "REQUIREMENT_STATUS_" + r.Status
numVal, found := gerritpb.Requirement_Status_value[stringVal]
if !found {
return nil, errors.Reason("no Status enum value for %q", r.Status).Err()
}
return &gerritpb.Requirement{
Status: gerritpb.Requirement_Status(numVal),
FallbackText: r.FallbackText,
Type: r.Type,
}, nil
}
type fileInfo struct {
LinesInserted int32 `json:"lines_inserted"`
LinesDeleted int32 `json:"lines_deleted"`
SizeDelta int64 `json:"size_delta"`
Size int64 `json:"size"`
}
func (fi *fileInfo) ToProto() *gerritpb.FileInfo {
return &gerritpb.FileInfo{
LinesInserted: fi.LinesInserted,
LinesDeleted: fi.LinesDeleted,
SizeDelta: fi.SizeDelta,
Size: fi.Size,
}
}
type revisionInfo struct {
Kind string `json:"kind"`
Number int `json:"_number"`
Uploader *accountInfo `json:"uploader"`
Ref string `json:"ref"`
Created Timestamp `json:"created"`
Description string `json:"description"`
Files map[string]*fileInfo `json:"files"`
Commit *commitInfo `json:"commit"`
}
func (ri *revisionInfo) ToProto() *gerritpb.RevisionInfo {
ret := &gerritpb.RevisionInfo{
Number: int32(ri.Number),
Uploader: ri.Uploader.ToProto(),
Ref: ri.Ref,
Created: timestamppb.New(ri.Created.Time),
Description: ri.Description,
}
|
if ri.Files != nil {
ret.Files = make(map[string]*gerritpb.FileInfo, len(ri.Files))
for i, fi := range ri.Files {
ret.Files[i] = fi.ToProto()
}
}
if ri.Commit != nil {
ret.Commit = ri.Commit.ToProto()
}
return ret
}
// https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#git-person-info
type gitPersonInfo struct {
Name string `json:"name"`
Email string `json:"email"`
}
func (g *gitPersonInfo) ToProto() *gerritpb.GitPersonInfo {
return &gerritpb.GitPersonInfo{
Name: g.Name,
Email: g.Email,
}
}
// https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#commit-info
type commitInfo struct {
Commit string `json:"commit"`
Parents []*commitInfo `json:"parents"`
Author *gitPersonInfo `json:"author"`
Committer *gitPersonInfo `json:"committer"`
Subject string `json:"subject"`
Message string `json:"message"`
}
func (c *commitInfo) ToProto() *gerritpb.CommitInfo {
parents := make([]*gerritpb.CommitInfo_Parent, len(c.Parents))
for i, p := range c.Parents {
parents[i] = &gerritpb.CommitInfo_Parent{Id: p.Commit}
}
return &gerritpb.CommitInfo{
Id: c.Commit,
Parents: parents,
Message: c.Message,
Author: c.Author.ToProto(),
// TODO(tandrii): support other fields once added.
}
}
// https://gerrit-review.googlesource.com/Documentation/rest-api-changes.html#related-change-and-commit-info
type relatedChangeAndCommitInfo struct {
Project string `json:"project"`
ChangeID string `json:"change_id"`
Commit commitInfo `json:"commit"`
Number int64 `json:"_change_number"`
Patchset int64 `json:"_revision_number"`
CurrentPatchset int64 `json:"_current_revision_number"`
// json.Unmarshal cannot convert enum string to value,
// so this field is handled specially in ToProto.
Status string `json:"status"`
}
func (r *relatedChangeAndCommitInfo) ToProto() *gerritpb.GetRelatedChangesResponse_ChangeAndCommit {
return &gerritpb.GetRelatedChangesResponse_ChangeAnd
|
if v, ok := gerritpb.RevisionInfo_Kind_value[ri.Kind]; ok {
ret.Kind = gerritpb.RevisionInfo_Kind(v)
}
|
random_line_split
|
json.go
|
mergeable_into"`
}
func (mi *mergeableInfo) ToProto() (*gerritpb.MergeableInfo, error) {
// Convert something like 'simple-two-way-in-core' to 'SIMPLE_TWO_WAY_IN_CORE'.
strategyEnumName := strings.Replace(strings.ToUpper(mi.Strategy), "-", "_", -1)
strategyEnumNum, found := gerritpb.MergeableStrategy_value[strategyEnumName]
if !found {
return nil, errors.Reason("no MergeableStrategy enum value for %q", strategyEnumName).Err()
}
submitTypeEnumNum, found := gerritpb.MergeableInfo_SubmitType_value[mi.SubmitType]
if !found {
return nil, errors.Reason("no SubmitType enum value for %q", mi.SubmitType).Err()
}
return &gerritpb.MergeableInfo{
SubmitType: gerritpb.MergeableInfo_SubmitType(submitTypeEnumNum),
Strategy: gerritpb.MergeableStrategy(strategyEnumNum),
Mergeable: mi.Mergeable,
CommitMerged: mi.CommitMerged,
ContentMerged: mi.ContentMerged,
Conflicts: mi.Conflicts,
MergeableInto: mi.MergeableInto,
}, nil
}
type addReviewerRequest struct {
Reviewer string `json:"reviewer"`
State string `json:"state,omitempty"`
Confirmed bool `json:"confirmed,omitempty"`
Notify string `json:"notify,omitempty"`
}
type reviewerInfo struct {
Name string `json:"name,omitempty"`
Email string `json:"email,omitempty"`
SecondaryEmails []string `json:"secondary_emails,omitempty"`
Username string `json:"username,omitempty"`
Approvals map[string]string `json:"approvals,omitempty"`
AccountID int64 `json:"_account_id,omitempty"`
}
func (ri *reviewerInfo) ToProtoReviewerInfo() (*gerritpb.ReviewerInfo, error) {
approvals := make(map[string]int32, 0)
for label, score := range ri.Approvals {
score = strings.TrimLeft(score, " ")
scoreInt, err := strconv.ParseInt(score, 10, 32)
if err != nil {
return nil, errors.Annotate(err, "parsing approvals").Err()
}
approvals[label] = int32(scoreInt)
}
return &gerritpb.ReviewerInfo{
Account: &gerritpb.AccountInfo{
Name: ri.Name,
Email: ri.Email,
SecondaryEmails: ri.SecondaryEmails,
Username: ri.Username,
AccountId: ri.AccountID,
},
Approvals: approvals,
}, nil
}
type addReviewerResult struct {
Input string `json:"input"`
Reviewers []reviewerInfo `json:"reviewers,omitempty"`
Ccs []reviewerInfo `json:"ccs,omitempty"`
Error string `json:"error,omitempty"`
Confirm bool `json:"confirm,omitempty"`
}
func (rr *addReviewerResult) ToProto() (*gerritpb.AddReviewerResult, error) {
reviewers := make([]*gerritpb.ReviewerInfo, 0)
for _, r := range rr.Reviewers {
rInfo, err := r.ToProtoReviewerInfo()
if err != nil {
return nil, errors.Annotate(err, "converting reviewerInfo").Err()
}
reviewers = append(reviewers, rInfo)
}
ccs := make([]*gerritpb.ReviewerInfo, 0)
for _, r := range rr.Ccs {
rInfo, err := r.ToProtoReviewerInfo()
if err != nil {
return nil, errors.Annotate(err, "converting reviewerInfo").Err()
}
ccs = append(ccs, rInfo)
}
return &gerritpb.AddReviewerResult{
Input: rr.Input,
Reviewers: reviewers,
Ccs: ccs,
Error: rr.Error,
Confirm: rr.Confirm,
}, nil
}
func enumToString(v int32, m map[int32]string) string {
if v == 0 {
return ""
}
prefixLen := strings.LastIndex(m[0], "UNSPECIFIED")
return m[v][prefixLen:]
}
type reviewInput struct {
Message string `json:"message,omitempty"`
Labels map[string]int32 `json:"labels,omitempty"`
Tag string `json:"tag,omitempty"`
Notify string `json:"notify,omitempty"`
NotifyDetails notifyDetails `json:"notify_details,omitempty"`
OnBehalfOf int64 `json:"on_behalf_of,omitempty"`
Ready bool `json:"ready,omitempty"`
WorkInProgress bool `json:"work_in_progress,omitempty"`
AddToAttentionSet []*attentionSetInput `json:"add_to_attention_set,omitempty"`
RemoveFromAttentionSet []*attentionSetInput `json:"remove_from_attention_set,omitempty"`
IgnoreAutomaticAttentionSetRules bool `json:"ignore_automatic_attention_set_rules,omitempty"`
Reviewers []*reviewerInput `json:"reviewers,omitempty"`
}
type notifyInfo struct {
Accounts []int64 `json:"accounts,omitempty"`
}
type notifyDetails map[string]*notifyInfo
func toNotifyDetails(in *gerritpb.NotifyDetails) notifyDetails {
recipients := in.GetRecipients()
if len(recipients) == 0 {
return nil
}
res := make(map[string]*notifyInfo, len(recipients))
for _, recipient := range recipients {
if len(recipient.Info.GetAccounts()) == 0 {
continue
}
rt := recipient.RecipientType
if rt == gerritpb.NotifyDetails_RECIPIENT_TYPE_UNSPECIFIED {
// Must have been caught in validation.
panic(fmt.Errorf("must specify recipient type"))
}
rts := enumToString(int32(rt.Number()), gerritpb.NotifyDetails_RecipientType_name)
if ni, ok := res[rts]; !ok {
ni = ¬ifyInfo{
Accounts: make([]int64, len(recipient.Info.GetAccounts())),
}
for i, aid := range recipient.Info.GetAccounts() {
ni.Accounts[i] = aid
}
res[rts] = ni
} else {
ni.Accounts = append(ni.Accounts, recipient.Info.GetAccounts()...)
}
}
for _, ni := range res {
// Sort & dedup accounts in each notification bucket.
sort.Slice(ni.Accounts, func(i, j int) bool { return ni.Accounts[i] < ni.Accounts[j] })
n := 0
for i := 1; i < len(ni.Accounts); i++ {
if ni.Accounts[n] == ni.Accounts[i] {
continue
}
n++
ni.Accounts[n] = ni.Accounts[i]
}
ni.Accounts = ni.Accounts[:n+1]
}
return res
}
type attentionSetInput struct {
User string `json:"user"`
Reason string `json:"reason"`
Notify string `json:"string,omitempty"`
NotifyDetails notifyDetails `json:"notify_details,omitempty"`
}
func toAttentionSetInput(in *gerritpb.AttentionSetInput) *attentionSetInput {
return &attentionSetInput{
User: in.User,
Reason: in.Reason,
Notify: enumToString(int32(in.Notify.Number()), gerritpb.Notify_name),
NotifyDetails: toNotifyDetails(in.NotifyDetails),
}
}
func toAttentionSetInputs(in []*gerritpb.AttentionSetInput) []*attentionSetInput {
if len(in) == 0 {
return nil
}
out := make([]*attentionSetInput, len(in))
for i, x := range in {
out[i] = toAttentionSetInput(x)
}
return out
}
type reviewerInput struct {
Reviewer string `json:"reviewer"`
State string `json:"state,omitempty"`
}
func toReviewerInputs(in []*gerritpb.ReviewerInput) []*reviewerInput {
if len(in) == 0 {
return nil
}
out := make([]*reviewerInput, len(in))
for i, x := range in {
out[i] = &reviewerInput{
Reviewer: x.Reviewer,
State: enumToString(int32(x.State.Number()), gerritpb.ReviewerInput_State_name),
}
}
return out
}
type reviewResult struct {
Labels map[string]int32 `json:"labels,omitempty"`
Reviewers map[string]*addReviewerResult `json:"reviewers,omitempty"`
}
func (rr *reviewResult) ToProto() (*gerritpb.ReviewResult, error) {
result := &gerritpb.ReviewResult{
Labels: rr.Labels,
}
if len(rr.Reviewers) == 0 {
return result, nil
}
reviewers := make(map[string]*gerritpb.AddReviewerResult, len(rr.Reviewers))
for i, x := range rr.Reviewers
|
{
reviewerDetails, err := x.ToProto()
if err != nil {
return nil, err
}
reviewers[i] = reviewerDetails
}
|
conditional_block
|
|
list_conflict_files_test.go
|
byte("encoding/codagé"),
},
Content: []byte(conflictContent1),
},
{
Header: &gitalypb.ConflictFileHeader{
CommitOid: ourCommitOid,
OurMode: int32(0o100644),
OurPath: []byte("files/ruby/feature.rb"),
TheirPath: []byte("files/ruby/feature.rb"),
},
Content: []byte(conflictContent2),
},
}
receivedFiles := getConflictFiles(t, c)
require.Len(t, receivedFiles, len(expectedFiles))
for i := 0; i < len(expectedFiles); i++ {
testhelper.ProtoEqual(t, receivedFiles[i].Header, expectedFiles[i].Header)
require.Equal(t, expectedFiles[i].Content, receivedFiles[i].Content)
}
}
func TestSuccessfulListConflictFilesRequestWithAncestor(t *testing.T) {
ctx := testhelper.Context(t)
_, repo, _, client := SetupConflictsService(ctx, t, true, nil)
ourCommitOid := "824be604a34828eb682305f0d963056cfac87b2d"
theirCommitOid := "1450cd639e0bc6721eb02800169e464f212cde06"
request := &gitalypb.ListConflictFilesRequest{
Repository: repo,
OurCommitOid: ourCommitOid,
TheirCommitOid: theirCommitOid,
}
c, err := client.ListConflictFiles(ctx, request)
require.NoError(t, err)
expectedFiles := []*conflictFile{
{
Header: &gitalypb.ConflictFileHeader{
CommitOid: ourCommitOid,
OurMode: int32(0o100644),
OurPath: []byte("files/ruby/popen.rb"),
TheirPath: []byte("files/ruby/popen.rb"),
AncestorPath: []byte("files/ruby/popen.rb"),
},
},
{
Header: &gitalypb.ConflictFileHeader{
CommitOid: ourCommitOid,
OurMode: int32(0o100644),
OurPath: []byte("files/ruby/regex.rb"),
TheirPath: []byte("files/ruby/regex.rb"),
AncestorPath: []byte("files/ruby/regex.rb"),
},
},
}
receivedFiles := getConflictFiles(t, c)
require.Len(t, receivedFiles, len(expectedFiles))
for i := 0; i < len(expectedFiles); i++ {
testhelper.ProtoEqual(t, receivedFiles[i].Header, expectedFiles[i].Header)
}
}
func TestListConflictFilesHugeDiff(t *testing.T) {
ctx := testhelper.Context(t)
cfg, repo, repoPath, client := SetupConflictsService(ctx, t, false, nil)
our := buildCommit(t, ctx, cfg, repo, repoPath, map[string][]byte{
"a": bytes.Repeat([]byte("a\n"), 128*1024),
"b": bytes.Repeat([]byte("b\n"), 128*1024),
})
their := buildCommit(t, ctx, cfg, repo, repoPath, map[string][]byte{
"a": bytes.Repeat([]byte("x\n"), 128*1024),
"b": bytes.Repeat([]byte("y\n"), 128*1024),
})
request := &gitalypb.ListConflictFilesRequest{
Repository: repo,
OurCommitOid: our,
TheirCommitOid: their,
}
c, err := client.ListConflictFiles(ctx, request)
require.NoError(t, err)
receivedFiles := getConflictFiles(t, c)
require.Len(t, receivedFiles, 2)
testhelper.ProtoEqual(t, &gitalypb.ConflictFileHeader{
CommitOid: our,
OurMode: int32(0o100644),
OurPath: []byte("a"),
TheirPath: []byte("a"),
}, receivedFiles[0].Header)
testhelper.ProtoEqual(t, &gitalypb.ConflictFileHeader{
CommitOid: our,
OurMode: int32(0o100644),
OurPath: []byte("b"),
TheirPath: []byte("b"),
}, receivedFiles[1].Header)
}
func buildCommit(t *testing.T, ctx context.Context, cfg config.Cfg, repo *gitalypb.Repository, repoPath string, files map[string][]byte) string {
t.Helper()
for file, contents := range files {
filePath := filepath.Join(repoPath, file)
require.NoError(t, os.WriteFile(filePath, contents, 0o666))
gittest.Exec(t, cfg, "-C", repoPath, "add", filePath)
}
gittest.Exec(t, cfg, "-C", repoPath, "commit", "-m", "message")
oid, err := localrepo.NewTestRepo(t, cfg, repo).ResolveRevision(ctx, git.Revision("HEAD"))
require.NoError(t, err)
gittest.Exec(t, cfg, "-C", repoPath, "reset", "--hard", "HEAD~")
return oid.String()
}
func TestListConflictFilesFailedPrecondition(t *testing.T) {
|
ctx := testhelper.Context(t)
_, repo, _, client := SetupConflictsService(ctx, t, true, nil)
testCases := []struct {
desc string
ourCommitOid string
theirCommitOid string
}{
{
desc: "conflict side missing",
ourCommitOid: "eb227b3e214624708c474bdab7bde7afc17cefcc",
theirCommitOid: "824be604a34828eb682305f0d963056cfac87b2d",
},
{
// These commits have a conflict on the 'VERSION' file in the test repo.
// The conflict is expected to raise an encoding error.
desc: "encoding error",
ourCommitOid: "bd493d44ae3c4dd84ce89cb75be78c4708cbd548",
theirCommitOid: "7df99c9ad5b8c9bfc5ae4fb7a91cc87adcce02ef",
},
{
desc: "submodule object lookup error",
ourCommitOid: "de78448b0b504f3f60093727bddfda1ceee42345",
theirCommitOid: "2f61d70f862c6a4f782ef7933e020a118282db29",
},
{
desc: "invalid commit id on 'our' side",
ourCommitOid: "abcdef0000000000000000000000000000000000",
theirCommitOid: "1a35b5a77cf6af7edf6703f88e82f6aff613666f",
},
{
desc: "invalid commit id on 'their' side",
ourCommitOid: "1a35b5a77cf6af7edf6703f88e82f6aff613666f",
theirCommitOid: "abcdef0000000000000000000000000000000000",
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
request := &gitalypb.ListConflictFilesRequest{
Repository: repo,
OurCommitOid: tc.ourCommitOid,
TheirCommitOid: tc.theirCommitOid,
}
c, err := client.ListConflictFiles(ctx, request)
if err == nil {
err = drainListConflictFilesResponse(c)
}
testhelper.RequireGrpcCode(t, err, codes.FailedPrecondition)
})
}
}
func TestListConflictFilesAllowTreeConflicts(t *testing.T) {
ctx := testhelper.Context(t)
_, repo, _, client := SetupConflictsService(ctx
|
random_line_split
|
|
list_conflict_files_test.go
|
(t *testing.T) {
ctx := testhelper.Context(t)
_, repo, _, client := SetupConflictsService(ctx, t, false, nil)
ourCommitOid := "1a35b5a77cf6af7edf6703f88e82f6aff613666f"
theirCommitOid := "8309e68585b28d61eb85b7e2834849dda6bf1733"
conflictContent1 := `<<<<<<< encoding/codagé
Content is not important, file name is
=======
Content can be important, but here, file name is of utmost importance
>>>>>>> encoding/codagé
`
conflictContent2 := `<<<<<<< files/ruby/feature.rb
class Feature
def foo
puts 'bar'
end
=======
# This file was changed in feature branch
# We put different code here to make merge conflict
class Conflict
>>>>>>> files/ruby/feature.rb
end
`
request := &gitalypb.ListConflictFilesRequest{
Repository: repo,
OurCommitOid: ourCommitOid,
TheirCommitOid: theirCommitOid,
}
c, err := client.ListConflictFiles(ctx, request)
require.NoError(t, err)
expectedFiles := []*conflictFile{
{
Header: &gitalypb.ConflictFileHeader{
CommitOid: ourCommitOid,
OurMode: int32(0o100644),
OurPath: []byte("encoding/codagé"),
TheirPath: []byte("encoding/codagé"),
},
Content: []byte(conflictContent1),
},
{
Header: &gitalypb.ConflictFileHeader{
CommitOid: ourCommitOid,
OurMode: int32(0o100644),
OurPath: []byte("files/ruby/feature.rb"),
TheirPath: []byte("files/ruby/feature.rb"),
},
Content: []byte(conflictContent2),
},
}
receivedFiles := getConflictFiles(t, c)
require.Len(t, receivedFiles, len(expectedFiles))
for i := 0; i < len(expectedFiles); i++ {
testhelper.ProtoEqual(t, receivedFiles[i].Header, expectedFiles[i].Header)
require.Equal(t, expectedFiles[i].Content, receivedFiles[i].Content)
}
}
func TestSuccessfulListConflictFilesRequestWithAncestor(t *testing.T) {
ctx := testhelper.Context(t)
_, repo, _, client := SetupConflictsService(ctx, t, true, nil)
ourCommitOid := "824be604a34828eb682305f0d963056cfac87b2d"
theirCommitOid := "1450cd639e0bc6721eb02800169e464f212cde06"
request := &gitalypb.ListConflictFilesRequest{
Repository: repo,
OurCommitOid: ourCommitOid,
TheirCommitOid: theirCommitOid,
}
c, err := client.ListConflictFiles(ctx, request)
require.NoError(t, err)
expectedFiles := []*conflictFile{
{
Header: &gitalypb.ConflictFileHeader{
CommitOid: ourCommitOid,
OurMode: int32(0o100644),
OurPath: []byte("files/ruby/popen.rb"),
TheirPath: []byte("files/ruby/popen.rb"),
AncestorPath: []byte("files/ruby/popen.rb"),
},
},
{
Header: &gitalypb.ConflictFileHeader{
CommitOid: ourCommitOid,
OurMode: int32(0o100644),
OurPath: []byte("files/ruby/regex.rb"),
TheirPath: []byte("files/ruby/regex.rb"),
AncestorPath: []byte("files/ruby/regex.rb"),
},
},
}
receivedFiles := getConflictFiles(t, c)
require.Len(t, receivedFiles, len(expectedFiles))
for i := 0; i < len(expectedFiles); i++ {
testhelper.ProtoEqual(t, receivedFiles[i].Header, expectedFiles[i].Header)
}
}
func TestListConflictFilesHugeDiff(t *testing.T) {
ctx := testhelper.Context(t)
cfg, repo, repoPath, client := SetupConflictsService(ctx, t, false, nil)
our := buildCommit(t, ctx, cfg, repo, repoPath, map[string][]byte{
"a": bytes.Repeat([]byte("a\n"), 128*1024),
"b": bytes.Repeat([]byte("b\n"), 128*1024),
})
their := buildCommit(t, ctx, cfg, repo, repoPath, map[string][]byte{
"a": bytes.Repeat([]byte("x\n"), 128*1024),
"b": bytes.Repeat([]byte("y\n"), 128*1024),
})
request := &gitalypb.ListConflictFilesRequest{
Repository: repo,
OurCommitOid: our,
TheirCommitOid: their,
}
c, err := client.ListConflictFiles(ctx, request)
require.NoError(t, err)
receivedFiles := getConflictFiles(t, c)
require.Len(t, receivedFiles, 2)
testhelper.ProtoEqual(t, &gitalypb.ConflictFileHeader{
CommitOid: our,
OurMode: int32(0o100644),
OurPath: []byte("a"),
TheirPath: []byte("a"),
}, receivedFiles[0].Header)
testhelper.ProtoEqual(t, &gitalypb.ConflictFileHeader{
CommitOid: our,
OurMode: int32(0o100644),
OurPath: []byte("b"),
TheirPath: []byte("b"),
}, receivedFiles[1].Header)
}
func buildCommit(t *testing.T, ctx context.Context, cfg config.Cfg, repo *gitalypb.Repository, repoPath string, files map[string][]byte) string {
t.Helper()
for file, contents := range files {
filePath := filepath.Join(repoPath, file)
require.NoError(t, os.WriteFile(filePath, contents, 0o666))
gittest.Exec(t, cfg, "-C", repoPath, "add", filePath)
}
gittest.Exec(t, cfg, "-C", repoPath, "commit", "-m", "message")
oid, err := localrepo.NewTestRepo(t, cfg, repo).ResolveRevision(ctx, git.Revision("HEAD"))
require.NoError(t, err)
gittest.Exec(t, cfg, "-C", repoPath, "reset", "--hard", "HEAD~")
return oid.String()
}
func TestListConflictFilesFailedPrecondition(t *testing.T) {
ctx := testhelper.Context(t)
_, repo, _, client := SetupConflictsService(ctx, t, true, nil)
testCases := []struct {
desc string
ourCommitOid string
theirCommitOid string
}{
{
desc: "conflict side missing",
ourCommitOid: "eb227b3e214624708c474bdab7bde7afc17cefcc",
theirCommitOid: "824be604a34828eb682305f0d963056cfac87b2d",
},
{
// These commits have a conflict on the 'VERSION' file in the test repo.
// The conflict is expected to raise an encoding error.
desc: "encoding error",
ourCommitOid: "bd493d44ae3c4dd84ce89cb75be78c4708cbd548",
theirCommitOid: "7df99c9ad5b8c9bfc5ae4fb7a91cc87adcce02ef",
},
{
desc: "submodule object lookup error",
ourCommitOid: "de78448b0b504f3f60093727bddfda1ceee42345",
theirCommitOid: "2f61d70f862c6a4f782ef7933e020a118282db29",
},
{
desc:
|
TestSuccessfulListConflictFilesRequest
|
identifier_name
|
|
list_conflict_files_test.go
|
repoPath, "reset", "--hard", "HEAD~")
return oid.String()
}
func TestListConflictFilesFailedPrecondition(t *testing.T) {
ctx := testhelper.Context(t)
_, repo, _, client := SetupConflictsService(ctx, t, true, nil)
testCases := []struct {
desc string
ourCommitOid string
theirCommitOid string
}{
{
desc: "conflict side missing",
ourCommitOid: "eb227b3e214624708c474bdab7bde7afc17cefcc",
theirCommitOid: "824be604a34828eb682305f0d963056cfac87b2d",
},
{
// These commits have a conflict on the 'VERSION' file in the test repo.
// The conflict is expected to raise an encoding error.
desc: "encoding error",
ourCommitOid: "bd493d44ae3c4dd84ce89cb75be78c4708cbd548",
theirCommitOid: "7df99c9ad5b8c9bfc5ae4fb7a91cc87adcce02ef",
},
{
desc: "submodule object lookup error",
ourCommitOid: "de78448b0b504f3f60093727bddfda1ceee42345",
theirCommitOid: "2f61d70f862c6a4f782ef7933e020a118282db29",
},
{
desc: "invalid commit id on 'our' side",
ourCommitOid: "abcdef0000000000000000000000000000000000",
theirCommitOid: "1a35b5a77cf6af7edf6703f88e82f6aff613666f",
},
{
desc: "invalid commit id on 'their' side",
ourCommitOid: "1a35b5a77cf6af7edf6703f88e82f6aff613666f",
theirCommitOid: "abcdef0000000000000000000000000000000000",
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
request := &gitalypb.ListConflictFilesRequest{
Repository: repo,
OurCommitOid: tc.ourCommitOid,
TheirCommitOid: tc.theirCommitOid,
}
c, err := client.ListConflictFiles(ctx, request)
if err == nil {
err = drainListConflictFilesResponse(c)
}
testhelper.RequireGrpcCode(t, err, codes.FailedPrecondition)
})
}
}
func TestListConflictFilesAllowTreeConflicts(t *testing.T) {
ctx := testhelper.Context(t)
_, repo, _, client := SetupConflictsService(ctx, t, true, nil)
ourCommitOid := "eb227b3e214624708c474bdab7bde7afc17cefcc"
theirCommitOid := "824be604a34828eb682305f0d963056cfac87b2d"
request := &gitalypb.ListConflictFilesRequest{
Repository: repo,
OurCommitOid: ourCommitOid,
TheirCommitOid: theirCommitOid,
AllowTreeConflicts: true,
}
c, err := client.ListConflictFiles(ctx, request)
require.NoError(t, err)
conflictContent := `<<<<<<< files/ruby/version_info.rb
module Gitlab
class VersionInfo
include Comparable
attr_reader :major, :minor, :patch
def self.parse(str)
if str && m = str.match(%r{(\d+)\.(\d+)\.(\d+)})
VersionInfo.new(m[1].to_i, m[2].to_i, m[3].to_i)
else
VersionInfo.new
end
end
def initialize(major = 0, minor = 0, patch = 0)
@major = major
@minor = minor
@patch = patch
end
def <=>(other)
return unless other.is_a? VersionInfo
return unless valid? && other.valid?
if other.major < @major
1
elsif @major < other.major
-1
elsif other.minor < @minor
1
elsif @minor < other.minor
-1
elsif other.patch < @patch
1
elsif @patch < other.patch
25
else
0
end
end
def to_s
if valid?
"%d.%d.%d" % [@major, @minor, @patch]
else
"Unknown"
end
end
def valid?
@major >= 0 && @minor >= 0 && @patch >= 0 && @major + @minor + @patch > 0
end
end
end
=======
>>>>>>>
`
expectedFiles := []*conflictFile{
{
Header: &gitalypb.ConflictFileHeader{
AncestorPath: []byte("files/ruby/version_info.rb"),
CommitOid: ourCommitOid,
OurMode: int32(0o100644),
OurPath: []byte("files/ruby/version_info.rb"),
},
Content: []byte(conflictContent),
},
}
testhelper.ProtoEqual(t, expectedFiles, getConflictFiles(t, c))
}
func TestFailedListConflictFilesRequestDueToValidation(t *testing.T) {
ctx := testhelper.Context(t)
_, repo, _, client := SetupConflictsService(ctx, t, true, nil)
ourCommitOid := "0b4bc9a49b562e85de7cc9e834518ea6828729b9"
theirCommitOid := "bb5206fee213d983da88c47f9cf4cc6caf9c66dc"
testCases := []struct {
desc string
request *gitalypb.ListConflictFilesRequest
code codes.Code
}{
{
desc: "empty repo",
request: &gitalypb.ListConflictFilesRequest{
Repository: nil,
OurCommitOid: ourCommitOid,
TheirCommitOid: theirCommitOid,
},
code: codes.InvalidArgument,
},
{
desc: "empty OurCommitId field",
request: &gitalypb.ListConflictFilesRequest{
Repository: repo,
OurCommitOid: "",
TheirCommitOid: theirCommitOid,
},
code: codes.InvalidArgument,
},
{
desc: "empty TheirCommitId field",
request: &gitalypb.ListConflictFilesRequest{
Repository: repo,
OurCommitOid: ourCommitOid,
TheirCommitOid: "",
},
code: codes.InvalidArgument,
},
}
for _, testCase := range testCases {
t.Run(testCase.desc, func(t *testing.T) {
c, _ := client.ListConflictFiles(ctx, testCase.request)
testhelper.RequireGrpcCode(t, drainListConflictFilesResponse(c), testCase.code)
})
}
}
func getConflictFiles(t *testing.T, c gitalypb.ConflictsService_ListConflictFilesClient) []*conflictFile {
t.Helper()
var files []*conflictFile
var currentFile *conflictFile
for {
|
r, err := c.Recv()
if err == io.EOF {
break
}
require.NoError(t, err)
for _, file := range r.GetFiles() {
// If there's a header this is the beginning of a new file
if header := file.GetHeader(); header != nil {
if currentFile != nil {
files = append(files, currentFile)
}
currentFile = &conflictFile{Header: header}
} else {
// Append to current file's content
currentFile.Content = append(currentFile.Content, file.GetContent()...)
}
}
}
|
conditional_block
|
|
list_conflict_files_test.go
|
b.ListConflictFilesRequest{
Repository: repo,
OurCommitOid: our,
TheirCommitOid: their,
}
c, err := client.ListConflictFiles(ctx, request)
require.NoError(t, err)
receivedFiles := getConflictFiles(t, c)
require.Len(t, receivedFiles, 2)
testhelper.ProtoEqual(t, &gitalypb.ConflictFileHeader{
CommitOid: our,
OurMode: int32(0o100644),
OurPath: []byte("a"),
TheirPath: []byte("a"),
}, receivedFiles[0].Header)
testhelper.ProtoEqual(t, &gitalypb.ConflictFileHeader{
CommitOid: our,
OurMode: int32(0o100644),
OurPath: []byte("b"),
TheirPath: []byte("b"),
}, receivedFiles[1].Header)
}
func buildCommit(t *testing.T, ctx context.Context, cfg config.Cfg, repo *gitalypb.Repository, repoPath string, files map[string][]byte) string {
t.Helper()
for file, contents := range files {
filePath := filepath.Join(repoPath, file)
require.NoError(t, os.WriteFile(filePath, contents, 0o666))
gittest.Exec(t, cfg, "-C", repoPath, "add", filePath)
}
gittest.Exec(t, cfg, "-C", repoPath, "commit", "-m", "message")
oid, err := localrepo.NewTestRepo(t, cfg, repo).ResolveRevision(ctx, git.Revision("HEAD"))
require.NoError(t, err)
gittest.Exec(t, cfg, "-C", repoPath, "reset", "--hard", "HEAD~")
return oid.String()
}
func TestListConflictFilesFailedPrecondition(t *testing.T) {
ctx := testhelper.Context(t)
_, repo, _, client := SetupConflictsService(ctx, t, true, nil)
testCases := []struct {
desc string
ourCommitOid string
theirCommitOid string
}{
{
desc: "conflict side missing",
ourCommitOid: "eb227b3e214624708c474bdab7bde7afc17cefcc",
theirCommitOid: "824be604a34828eb682305f0d963056cfac87b2d",
},
{
// These commits have a conflict on the 'VERSION' file in the test repo.
// The conflict is expected to raise an encoding error.
desc: "encoding error",
ourCommitOid: "bd493d44ae3c4dd84ce89cb75be78c4708cbd548",
theirCommitOid: "7df99c9ad5b8c9bfc5ae4fb7a91cc87adcce02ef",
},
{
desc: "submodule object lookup error",
ourCommitOid: "de78448b0b504f3f60093727bddfda1ceee42345",
theirCommitOid: "2f61d70f862c6a4f782ef7933e020a118282db29",
},
{
desc: "invalid commit id on 'our' side",
ourCommitOid: "abcdef0000000000000000000000000000000000",
theirCommitOid: "1a35b5a77cf6af7edf6703f88e82f6aff613666f",
},
{
desc: "invalid commit id on 'their' side",
ourCommitOid: "1a35b5a77cf6af7edf6703f88e82f6aff613666f",
theirCommitOid: "abcdef0000000000000000000000000000000000",
},
}
for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
request := &gitalypb.ListConflictFilesRequest{
Repository: repo,
OurCommitOid: tc.ourCommitOid,
TheirCommitOid: tc.theirCommitOid,
}
c, err := client.ListConflictFiles(ctx, request)
if err == nil {
err = drainListConflictFilesResponse(c)
}
testhelper.RequireGrpcCode(t, err, codes.FailedPrecondition)
})
}
}
func TestListConflictFilesAllowTreeConflicts(t *testing.T) {
ctx := testhelper.Context(t)
_, repo, _, client := SetupConflictsService(ctx, t, true, nil)
ourCommitOid := "eb227b3e214624708c474bdab7bde7afc17cefcc"
theirCommitOid := "824be604a34828eb682305f0d963056cfac87b2d"
request := &gitalypb.ListConflictFilesRequest{
Repository: repo,
OurCommitOid: ourCommitOid,
TheirCommitOid: theirCommitOid,
AllowTreeConflicts: true,
}
c, err := client.ListConflictFiles(ctx, request)
require.NoError(t, err)
conflictContent := `<<<<<<< files/ruby/version_info.rb
module Gitlab
class VersionInfo
include Comparable
attr_reader :major, :minor, :patch
def self.parse(str)
if str && m = str.match(%r{(\d+)\.(\d+)\.(\d+)})
VersionInfo.new(m[1].to_i, m[2].to_i, m[3].to_i)
else
VersionInfo.new
end
end
def initialize(major = 0, minor = 0, patch = 0)
@major = major
@minor = minor
@patch = patch
end
def <=>(other)
return unless other.is_a? VersionInfo
return unless valid? && other.valid?
if other.major < @major
1
elsif @major < other.major
-1
elsif other.minor < @minor
1
elsif @minor < other.minor
-1
elsif other.patch < @patch
1
elsif @patch < other.patch
25
else
0
end
end
def to_s
if valid?
"%d.%d.%d" % [@major, @minor, @patch]
else
"Unknown"
end
end
def valid?
@major >= 0 && @minor >= 0 && @patch >= 0 && @major + @minor + @patch > 0
end
end
end
=======
>>>>>>>
`
expectedFiles := []*conflictFile{
{
Header: &gitalypb.ConflictFileHeader{
AncestorPath: []byte("files/ruby/version_info.rb"),
CommitOid: ourCommitOid,
OurMode: int32(0o100644),
OurPath: []byte("files/ruby/version_info.rb"),
},
Content: []byte(conflictContent),
},
}
testhelper.ProtoEqual(t, expectedFiles, getConflictFiles(t, c))
}
func TestFailedListConflictFilesRequestDueToValidation(t *testing.T) {
c
|
tx := testhelper.Context(t)
_, repo, _, client := SetupConflictsService(ctx, t, true, nil)
ourCommitOid := "0b4bc9a49b562e85de7cc9e834518ea6828729b9"
theirCommitOid := "bb5206fee213d983da88c47f9cf4cc6caf9c66dc"
testCases := []struct {
desc string
request *gitalypb.ListConflictFilesRequest
code codes.Code
}{
{
desc: "empty repo",
request: &gitalypb.ListConflictFilesRequest{
Repository: nil,
OurCommitOid: ourCommitOid,
TheirCommitOid: theirCommitOid,
},
code: codes.InvalidArgument,
|
identifier_body
|
|
reconcile.go
|
crdHandle,
crdClient: crdClient,
resourceClients: resourceClients,
}
}
// Run starts the reconciliation loop and blocks until the context is done, or
// there is an unrecoverable error. Reconciliation actions are done at the
// supplied interval.
func (r *Reconciler) Run(ctx context.Context, interval time.Duration) error {
glog.V(4).Infof("Starting reconciler for %v.%v.%v", r.gvk.Group, r.gvk.Version, r.gvk.Kind)
go wait.Until(r.run, interval, ctx.Done())
<-ctx.Done()
return ctx.Err()
}
type subresource struct {
client resource.Client
object runtime.Object
lifecycle lifecycle
}
type subresources []*subresource
// Contains subresources grouped by their controlling resource.
type subresourceMap map[string]subresources
type action struct {
newCRState states.State
newCRReason string
subresourcesToCreate subresources
subresourcesToDelete subresources
}
func (a action) String() string {
var sCreateNames []string
for _, s := range a.subresourcesToCreate {
sCreateNames = append(sCreateNames, s.client.Plural())
}
var sDeleteNames []string
for _, s := range a.subresourcesToDelete {
sDeleteNames = append(sDeleteNames, s.client.Plural())
}
return fmt.Sprintf(
`{
newCRState: "%s",
newCRReason: "%s",
subresourcesToCreate: "%s",
subresourcesToDelete: "%s"
}`,
a.newCRState,
a.newCRReason,
strings.Join(sCreateNames, ", "),
strings.Join(sDeleteNames, ", "))
}
func (r *Reconciler) run() {
subresourcesByCR := r.groupSubresourcesByCustomResource()
for crName, subs := range subresourcesByCR {
a, cr, err := r.planAction(crName, subs)
if err != nil {
glog.Errorf(`failed to plan action for custom resource: [%s] subresources: [%v] error: [%s]`, crName, subresourcesByCR, err.Error())
continue
}
glog.Infof("planned action: %s", a.String())
errs := r.executeAction(crName, cr, a)
if len(errs) > 0 {
glog.Errorf(`failed to execute action for custom resource: [%s] subresources: %v errors: %v`, crName, subresourcesByCR, errs)
}
}
}
// TODO(CD): groupSubresourcesByCustomResource() doesn't work for a custom
// resource with no sub-resource(s) or the sub-resource have been deleted.
// As resourceClient.List() will not have any sub-resource belonging to the
// custom resource, result will not have the controller name as one of its
// keys.
//
// To fix the problem, we could do a List from the CR client and then iterate
// over those names instead of keys from the intermediate result map we built
// based on the subresources.
func (r *Reconciler) groupSubresourcesByCustomResource() subresourceMap {
result := subresourceMap{}
// Get the list of crs.
crListObj, err := r.crdClient.List(r.namespace, map[string]string{})
if err != nil || crListObj == nil {
glog.Warningf("[reconcile] could not list custom resources. Got error %v %v", err, crListObj)
return result
}
customResourceList := crListObj.(crd.CustomResourceList)
// Get the list of custom resources
crList := customResourceList.GetItems()
// Return if the list is empty
if len(crList) == 0 {
glog.Warningf("[reconcile] custom resources list is empty")
return result
}
for _, resourceClient := range r.resourceClients {
objects, err := resourceClient.List(r.namespace, map[string]string{})
if err != nil {
glog.Warningf(`[reconcile] failed to list "%s" subresources`, resourceClient.Plural())
continue
}
for _, obj := range objects {
controllerRef := metav1.GetControllerOf(obj)
if controllerRef == nil {
glog.V(4).Infof("[reconcile] ignoring sub-resource %v, %v as it doesn not have a controller reference", obj.GetName(), r.namespace)
continue
}
// Only manipulate controller-created subresources.
if controllerRef.APIVersion != r.gvk.GroupVersion().String() || controllerRef.Kind != r.gvk.Kind {
glog.V(4).Infof("[reconcile] ignoring sub-resource %v, %v as controlling custom resource is from a different group, version and kind", obj.GetName(), r.namespace)
continue
}
subLifecycle := exists
objMeta, err := meta.Accessor(obj)
if err != nil {
glog.Warningf("[reconcile] error getting meta accessor for subresource: %v", err)
continue
}
if objMeta.GetDeletionTimestamp() != nil {
subLifecycle = deleting
}
runtimeObj, ok := obj.(runtime.Object)
if !ok {
glog.Warningf("[reconcile] error asserting metav1.Object as runtime.Object: %v", err)
continue
}
controllerName := controllerRef.Name
objList := result[controllerName]
result[controllerName] = append(objList, &subresource{resourceClient, runtimeObj, subLifecycle})
}
}
// Iterate over the crs to get the list of missing sub resources
// ASSUMPTION: There is at most one subresource of each kind per
// custom resource. We use the plural form as a key
for _, item := range crList {
cr, ok := item.(crd.CustomResource)
if !ok {
glog.Warningf("[reconcile] failed to assert item %v to type CustomResource", item)
continue
}
subs, ok := result[cr.Name()]
if !ok {
glog.Warningf("[reconcile] no sub-resources found for cr %v", cr.Name())
}
// Find non-existing subresources based on the expected subresource clients.
existingSubs := map[string]struct{}{}
for _, sub := range subs {
existingSubs[sub.client.Plural()] = struct{}{}
}
for _, subClient := range r.resourceClients {
_, exists := existingSubs[subClient.Plural()]
if !exists {
result[cr.Name()] = append(subs, &subresource{subClient, nil, doesNotExist})
}
}
}
return result
}
func (subs subresources) filter(predicate func(s *subresource) bool) subresources
|
func (subs subresources) any(predicate func(s *subresource) bool) bool {
return len(subs.filter(predicate)) > 0
}
func (subs subresources) all(predicate func(s *subresource) bool) bool {
return len(subs.filter(predicate)) == len(subs)
}
func (r *Reconciler) planAction(controllerName string, subs subresources) (*action, crd.CustomResource, error) {
// If the controller name is empty, these are not our subresources;
// do nothing.
if controllerName == "" {
return &action{}, nil, nil
}
// Compute the current lifecycle phase of the custom resource.
customResourceLifecycle := exists
crObj, err := r.crdClient.Get(r.namespace, controllerName)
if err != nil && apierrors.IsNotFound(err) {
customResourceLifecycle = doesNotExist
}
crMeta, err := meta.Accessor(crObj)
if err != nil {
glog.Warningf("[reconcile] error getting meta accessor for controlling custom resource: %v", err)
} else if crMeta.GetDeletionTimestamp() != nil {
customResourceLifecycle = deleting
}
// If the custom resource is deleting or does not exist, clean up all
// subresources.
if customResourceLifecycle.isOneOf(doesNotExist, deleting) {
return &action{subresourcesToDelete: subs}, nil, nil
}
cr, ok := crObj.(crd.CustomResource)
if !ok {
return &action{}, nil, fmt.Errorf("object retrieved from CRD client not an instance of crd.CustomResource: [%v]", crObj)
}
customResourceSpecState := cr.GetSpecState()
customResourceStatusState := cr.GetStatusState()
// If the desired custom resource state is running or completed AND
// the custom resource is in a terminal state, then delete all subresources.
if customResourceSpecState.IsOneOf(states.Running, states.Completed) &&
customResourceStatusState.IsOneOf(states.Completed, states.Failed) {
return &action{subresourcesToDelete: subs}, nil, nil
}
// If the desired custom resource state is running or completed AND
// the current custom resource status is non-terminal, ANY non-ephemeral
// subresource that is failed, does not exist or has been deleted causes
// the custom resource current state to move to failed.
if customResourceSpecState.IsOneOf(states.Running, states
|
{
var result subresources
for _, sub := range subs {
if predicate(sub) {
result = append(result, sub)
}
}
return result
}
|
identifier_body
|
reconcile.go
|
crdHandle,
crdClient: crdClient,
resourceClients: resourceClients,
}
}
// Run starts the reconciliation loop and blocks until the context is done, or
// there is an unrecoverable error. Reconciliation actions are done at the
// supplied interval.
func (r *Reconciler) Run(ctx context.Context, interval time.Duration) error {
glog.V(4).Infof("Starting reconciler for %v.%v.%v", r.gvk.Group, r.gvk.Version, r.gvk.Kind)
go wait.Until(r.run, interval, ctx.Done())
<-ctx.Done()
return ctx.Err()
}
type subresource struct {
client resource.Client
object runtime.Object
lifecycle lifecycle
}
type subresources []*subresource
// Contains subresources grouped by their controlling resource.
type subresourceMap map[string]subresources
type action struct {
newCRState states.State
newCRReason string
subresourcesToCreate subresources
subresourcesToDelete subresources
}
func (a action) String() string {
var sCreateNames []string
for _, s := range a.subresourcesToCreate {
sCreateNames = append(sCreateNames, s.client.Plural())
}
var sDeleteNames []string
for _, s := range a.subresourcesToDelete {
sDeleteNames = append(sDeleteNames, s.client.Plural())
}
return fmt.Sprintf(
`{
newCRState: "%s",
newCRReason: "%s",
subresourcesToCreate: "%s",
subresourcesToDelete: "%s"
}`,
a.newCRState,
a.newCRReason,
strings.Join(sCreateNames, ", "),
strings.Join(sDeleteNames, ", "))
}
func (r *Reconciler) run() {
subresourcesByCR := r.groupSubresourcesByCustomResource()
for crName, subs := range subresourcesByCR {
a, cr, err := r.planAction(crName, subs)
if err != nil {
glog.Errorf(`failed to plan action for custom resource: [%s] subresources: [%v] error: [%s]`, crName, subresourcesByCR, err.Error())
continue
}
glog.Infof("planned action: %s", a.String())
errs := r.executeAction(crName, cr, a)
if len(errs) > 0 {
glog.Errorf(`failed to execute action for custom resource: [%s] subresources: %v errors: %v`, crName, subresourcesByCR, errs)
}
}
}
// TODO(CD): groupSubresourcesByCustomResource() doesn't work for a custom
// resource with no sub-resource(s) or the sub-resource have been deleted.
// As resourceClient.List() will not have any sub-resource belonging to the
// custom resource, result will not have the controller name as one of its
// keys.
//
// To fix the problem, we could do a List from the CR client and then iterate
// over those names instead of keys from the intermediate result map we built
// based on the subresources.
func (r *Reconciler) groupSubresourcesByCustomResource() subresourceMap {
result := subresourceMap{}
// Get the list of crs.
crListObj, err := r.crdClient.List(r.namespace, map[string]string{})
if err != nil || crListObj == nil {
glog.Warningf("[reconcile] could not list custom resources. Got error %v %v", err, crListObj)
return result
}
customResourceList := crListObj.(crd.CustomResourceList)
// Get the list of custom resources
crList := customResourceList.GetItems()
// Return if the list is empty
if len(crList) == 0 {
glog.Warningf("[reconcile] custom resources list is empty")
return result
}
for _, resourceClient := range r.resourceClients {
objects, err := resourceClient.List(r.namespace, map[string]string{})
if err != nil {
glog.Warningf(`[reconcile] failed to list "%s" subresources`, resourceClient.Plural())
continue
}
for _, obj := range objects {
controllerRef := metav1.GetControllerOf(obj)
if controllerRef == nil {
glog.V(4).Infof("[reconcile] ignoring sub-resource %v, %v as it doesn not have a controller reference", obj.GetName(), r.namespace)
continue
}
// Only manipulate controller-created subresources.
if controllerRef.APIVersion != r.gvk.GroupVersion().String() || controllerRef.Kind != r.gvk.Kind {
glog.V(4).Infof("[reconcile] ignoring sub-resource %v, %v as controlling custom resource is from a different group, version and kind", obj.GetName(), r.namespace)
continue
}
subLifecycle := exists
objMeta, err := meta.Accessor(obj)
if err != nil {
glog.Warningf("[reconcile] error getting meta accessor for subresource: %v", err)
continue
}
if objMeta.GetDeletionTimestamp() != nil {
subLifecycle = deleting
}
runtimeObj, ok := obj.(runtime.Object)
if !ok {
glog.Warningf("[reconcile] error asserting metav1.Object as runtime.Object: %v", err)
continue
}
controllerName := controllerRef.Name
objList := result[controllerName]
result[controllerName] = append(objList, &subresource{resourceClient, runtimeObj, subLifecycle})
}
}
// Iterate over the crs to get the list of missing sub resources
// ASSUMPTION: There is at most one subresource of each kind per
// custom resource. We use the plural form as a key
for _, item := range crList {
cr, ok := item.(crd.CustomResource)
if !ok {
glog.Warningf("[reconcile] failed to assert item %v to type CustomResource", item)
continue
}
subs, ok := result[cr.Name()]
if !ok {
glog.Warningf("[reconcile] no sub-resources found for cr %v", cr.Name())
}
// Find non-existing subresources based on the expected subresource clients.
existingSubs := map[string]struct{}{}
for _, sub := range subs {
existingSubs[sub.client.Plural()] = struct{}{}
}
for _, subClient := range r.resourceClients {
_, exists := existingSubs[subClient.Plural()]
if !exists {
result[cr.Name()] = append(subs, &subresource{subClient, nil, doesNotExist})
}
}
}
return result
}
func (subs subresources) filter(predicate func(s *subresource) bool) subresources {
var result subresources
for _, sub := range subs {
if predicate(sub) {
result = append(result, sub)
}
}
return result
}
func (subs subresources) any(predicate func(s *subresource) bool) bool {
return len(subs.filter(predicate)) > 0
}
func (subs subresources)
|
(predicate func(s *subresource) bool) bool {
return len(subs.filter(predicate)) == len(subs)
}
func (r *Reconciler) planAction(controllerName string, subs subresources) (*action, crd.CustomResource, error) {
// If the controller name is empty, these are not our subresources;
// do nothing.
if controllerName == "" {
return &action{}, nil, nil
}
// Compute the current lifecycle phase of the custom resource.
customResourceLifecycle := exists
crObj, err := r.crdClient.Get(r.namespace, controllerName)
if err != nil && apierrors.IsNotFound(err) {
customResourceLifecycle = doesNotExist
}
crMeta, err := meta.Accessor(crObj)
if err != nil {
glog.Warningf("[reconcile] error getting meta accessor for controlling custom resource: %v", err)
} else if crMeta.GetDeletionTimestamp() != nil {
customResourceLifecycle = deleting
}
// If the custom resource is deleting or does not exist, clean up all
// subresources.
if customResourceLifecycle.isOneOf(doesNotExist, deleting) {
return &action{subresourcesToDelete: subs}, nil, nil
}
cr, ok := crObj.(crd.CustomResource)
if !ok {
return &action{}, nil, fmt.Errorf("object retrieved from CRD client not an instance of crd.CustomResource: [%v]", crObj)
}
customResourceSpecState := cr.GetSpecState()
customResourceStatusState := cr.GetStatusState()
// If the desired custom resource state is running or completed AND
// the custom resource is in a terminal state, then delete all subresources.
if customResourceSpecState.IsOneOf(states.Running, states.Completed) &&
customResourceStatusState.IsOneOf(states.Completed, states.Failed) {
return &action{subresourcesToDelete: subs}, nil, nil
}
// If the desired custom resource state is running or completed AND
// the current custom resource status is non-terminal, ANY non-ephemeral
// subresource that is failed, does not exist or has been deleted causes
// the custom resource current state to move to failed.
if customResourceSpecState.IsOneOf(states.Running, states.Completed
|
all
|
identifier_name
|
reconcile.go
|
crdHandle,
crdClient: crdClient,
resourceClients: resourceClients,
}
}
// Run starts the reconciliation loop and blocks until the context is done, or
// there is an unrecoverable error. Reconciliation actions are done at the
// supplied interval.
func (r *Reconciler) Run(ctx context.Context, interval time.Duration) error {
glog.V(4).Infof("Starting reconciler for %v.%v.%v", r.gvk.Group, r.gvk.Version, r.gvk.Kind)
go wait.Until(r.run, interval, ctx.Done())
<-ctx.Done()
return ctx.Err()
}
type subresource struct {
client resource.Client
object runtime.Object
lifecycle lifecycle
}
type subresources []*subresource
// Contains subresources grouped by their controlling resource.
type subresourceMap map[string]subresources
type action struct {
newCRState states.State
newCRReason string
subresourcesToCreate subresources
subresourcesToDelete subresources
}
func (a action) String() string {
var sCreateNames []string
for _, s := range a.subresourcesToCreate {
sCreateNames = append(sCreateNames, s.client.Plural())
}
var sDeleteNames []string
for _, s := range a.subresourcesToDelete {
sDeleteNames = append(sDeleteNames, s.client.Plural())
}
return fmt.Sprintf(
`{
newCRState: "%s",
newCRReason: "%s",
subresourcesToCreate: "%s",
subresourcesToDelete: "%s"
}`,
a.newCRState,
a.newCRReason,
strings.Join(sCreateNames, ", "),
strings.Join(sDeleteNames, ", "))
}
func (r *Reconciler) run() {
subresourcesByCR := r.groupSubresourcesByCustomResource()
for crName, subs := range subresourcesByCR {
a, cr, err := r.planAction(crName, subs)
if err != nil {
glog.Errorf(`failed to plan action for custom resource: [%s] subresources: [%v] error: [%s]`, crName, subresourcesByCR, err.Error())
continue
}
glog.Infof("planned action: %s", a.String())
errs := r.executeAction(crName, cr, a)
if len(errs) > 0 {
glog.Errorf(`failed to execute action for custom resource: [%s] subresources: %v errors: %v`, crName, subresourcesByCR, errs)
}
}
}
// TODO(CD): groupSubresourcesByCustomResource() doesn't work for a custom
// resource with no sub-resource(s) or the sub-resource have been deleted.
// As resourceClient.List() will not have any sub-resource belonging to the
// custom resource, result will not have the controller name as one of its
// keys.
//
// To fix the problem, we could do a List from the CR client and then iterate
// over those names instead of keys from the intermediate result map we built
// based on the subresources.
func (r *Reconciler) groupSubresourcesByCustomResource() subresourceMap {
result := subresourceMap{}
// Get the list of crs.
crListObj, err := r.crdClient.List(r.namespace, map[string]string{})
if err != nil || crListObj == nil {
glog.Warningf("[reconcile] could not list custom resources. Got error %v %v", err, crListObj)
return result
}
customResourceList := crListObj.(crd.CustomResourceList)
// Get the list of custom resources
crList := customResourceList.GetItems()
// Return if the list is empty
if len(crList) == 0 {
glog.Warningf("[reconcile] custom resources list is empty")
return result
}
for _, resourceClient := range r.resourceClients {
objects, err := resourceClient.List(r.namespace, map[string]string{})
if err != nil {
glog.Warningf(`[reconcile] failed to list "%s" subresources`, resourceClient.Plural())
continue
}
for _, obj := range objects {
controllerRef := metav1.GetControllerOf(obj)
if controllerRef == nil {
glog.V(4).Infof("[reconcile] ignoring sub-resource %v, %v as it doesn not have a controller reference", obj.GetName(), r.namespace)
continue
}
// Only manipulate controller-created subresources.
if controllerRef.APIVersion != r.gvk.GroupVersion().String() || controllerRef.Kind != r.gvk.Kind {
glog.V(4).Infof("[reconcile] ignoring sub-resource %v, %v as controlling custom resource is from a different group, version and kind", obj.GetName(), r.namespace)
continue
}
subLifecycle := exists
objMeta, err := meta.Accessor(obj)
if err != nil {
glog.Warningf("[reconcile] error getting meta accessor for subresource: %v", err)
continue
}
if objMeta.GetDeletionTimestamp() != nil {
subLifecycle = deleting
}
runtimeObj, ok := obj.(runtime.Object)
if !ok {
glog.Warningf("[reconcile] error asserting metav1.Object as runtime.Object: %v", err)
continue
}
controllerName := controllerRef.Name
objList := result[controllerName]
result[controllerName] = append(objList, &subresource{resourceClient, runtimeObj, subLifecycle})
}
}
// Iterate over the crs to get the list of missing sub resources
// ASSUMPTION: There is at most one subresource of each kind per
// custom resource. We use the plural form as a key
for _, item := range crList {
cr, ok := item.(crd.CustomResource)
if !ok
|
subs, ok := result[cr.Name()]
if !ok {
glog.Warningf("[reconcile] no sub-resources found for cr %v", cr.Name())
}
// Find non-existing subresources based on the expected subresource clients.
existingSubs := map[string]struct{}{}
for _, sub := range subs {
existingSubs[sub.client.Plural()] = struct{}{}
}
for _, subClient := range r.resourceClients {
_, exists := existingSubs[subClient.Plural()]
if !exists {
result[cr.Name()] = append(subs, &subresource{subClient, nil, doesNotExist})
}
}
}
return result
}
func (subs subresources) filter(predicate func(s *subresource) bool) subresources {
var result subresources
for _, sub := range subs {
if predicate(sub) {
result = append(result, sub)
}
}
return result
}
func (subs subresources) any(predicate func(s *subresource) bool) bool {
return len(subs.filter(predicate)) > 0
}
func (subs subresources) all(predicate func(s *subresource) bool) bool {
return len(subs.filter(predicate)) == len(subs)
}
func (r *Reconciler) planAction(controllerName string, subs subresources) (*action, crd.CustomResource, error) {
// If the controller name is empty, these are not our subresources;
// do nothing.
if controllerName == "" {
return &action{}, nil, nil
}
// Compute the current lifecycle phase of the custom resource.
customResourceLifecycle := exists
crObj, err := r.crdClient.Get(r.namespace, controllerName)
if err != nil && apierrors.IsNotFound(err) {
customResourceLifecycle = doesNotExist
}
crMeta, err := meta.Accessor(crObj)
if err != nil {
glog.Warningf("[reconcile] error getting meta accessor for controlling custom resource: %v", err)
} else if crMeta.GetDeletionTimestamp() != nil {
customResourceLifecycle = deleting
}
// If the custom resource is deleting or does not exist, clean up all
// subresources.
if customResourceLifecycle.isOneOf(doesNotExist, deleting) {
return &action{subresourcesToDelete: subs}, nil, nil
}
cr, ok := crObj.(crd.CustomResource)
if !ok {
return &action{}, nil, fmt.Errorf("object retrieved from CRD client not an instance of crd.CustomResource: [%v]", crObj)
}
customResourceSpecState := cr.GetSpecState()
customResourceStatusState := cr.GetStatusState()
// If the desired custom resource state is running or completed AND
// the custom resource is in a terminal state, then delete all subresources.
if customResourceSpecState.IsOneOf(states.Running, states.Completed) &&
customResourceStatusState.IsOneOf(states.Completed, states.Failed) {
return &action{subresourcesToDelete: subs}, nil, nil
}
// If the desired custom resource state is running or completed AND
// the current custom resource status is non-terminal, ANY non-ephemeral
// subresource that is failed, does not exist or has been deleted causes
// the custom resource current state to move to failed.
if customResourceSpecState.IsOneOf(states.Running, states
|
{
glog.Warningf("[reconcile] failed to assert item %v to type CustomResource", item)
continue
}
|
conditional_block
|
reconcile.go
|
crdHandle,
crdClient: crdClient,
resourceClients: resourceClients,
}
}
// Run starts the reconciliation loop and blocks until the context is done, or
// there is an unrecoverable error. Reconciliation actions are done at the
// supplied interval.
func (r *Reconciler) Run(ctx context.Context, interval time.Duration) error {
glog.V(4).Infof("Starting reconciler for %v.%v.%v", r.gvk.Group, r.gvk.Version, r.gvk.Kind)
go wait.Until(r.run, interval, ctx.Done())
<-ctx.Done()
return ctx.Err()
}
type subresource struct {
client resource.Client
object runtime.Object
lifecycle lifecycle
}
type subresources []*subresource
// Contains subresources grouped by their controlling resource.
type subresourceMap map[string]subresources
type action struct {
newCRState states.State
newCRReason string
subresourcesToCreate subresources
subresourcesToDelete subresources
}
func (a action) String() string {
var sCreateNames []string
for _, s := range a.subresourcesToCreate {
sCreateNames = append(sCreateNames, s.client.Plural())
}
var sDeleteNames []string
for _, s := range a.subresourcesToDelete {
sDeleteNames = append(sDeleteNames, s.client.Plural())
}
return fmt.Sprintf(
`{
newCRState: "%s",
newCRReason: "%s",
subresourcesToCreate: "%s",
subresourcesToDelete: "%s"
}`,
a.newCRState,
a.newCRReason,
strings.Join(sCreateNames, ", "),
strings.Join(sDeleteNames, ", "))
}
func (r *Reconciler) run() {
subresourcesByCR := r.groupSubresourcesByCustomResource()
for crName, subs := range subresourcesByCR {
a, cr, err := r.planAction(crName, subs)
if err != nil {
glog.Errorf(`failed to plan action for custom resource: [%s] subresources: [%v] error: [%s]`, crName, subresourcesByCR, err.Error())
continue
}
glog.Infof("planned action: %s", a.String())
errs := r.executeAction(crName, cr, a)
if len(errs) > 0 {
glog.Errorf(`failed to execute action for custom resource: [%s] subresources: %v errors: %v`, crName, subresourcesByCR, errs)
}
}
}
// TODO(CD): groupSubresourcesByCustomResource() doesn't work for a custom
// resource with no sub-resource(s) or the sub-resource have been deleted.
// As resourceClient.List() will not have any sub-resource belonging to the
|
// To fix the problem, we could do a List from the CR client and then iterate
// over those names instead of keys from the intermediate result map we built
// based on the subresources.
func (r *Reconciler) groupSubresourcesByCustomResource() subresourceMap {
result := subresourceMap{}
// Get the list of crs.
crListObj, err := r.crdClient.List(r.namespace, map[string]string{})
if err != nil || crListObj == nil {
glog.Warningf("[reconcile] could not list custom resources. Got error %v %v", err, crListObj)
return result
}
customResourceList := crListObj.(crd.CustomResourceList)
// Get the list of custom resources
crList := customResourceList.GetItems()
// Return if the list is empty
if len(crList) == 0 {
glog.Warningf("[reconcile] custom resources list is empty")
return result
}
for _, resourceClient := range r.resourceClients {
objects, err := resourceClient.List(r.namespace, map[string]string{})
if err != nil {
glog.Warningf(`[reconcile] failed to list "%s" subresources`, resourceClient.Plural())
continue
}
for _, obj := range objects {
controllerRef := metav1.GetControllerOf(obj)
if controllerRef == nil {
glog.V(4).Infof("[reconcile] ignoring sub-resource %v, %v as it doesn not have a controller reference", obj.GetName(), r.namespace)
continue
}
// Only manipulate controller-created subresources.
if controllerRef.APIVersion != r.gvk.GroupVersion().String() || controllerRef.Kind != r.gvk.Kind {
glog.V(4).Infof("[reconcile] ignoring sub-resource %v, %v as controlling custom resource is from a different group, version and kind", obj.GetName(), r.namespace)
continue
}
subLifecycle := exists
objMeta, err := meta.Accessor(obj)
if err != nil {
glog.Warningf("[reconcile] error getting meta accessor for subresource: %v", err)
continue
}
if objMeta.GetDeletionTimestamp() != nil {
subLifecycle = deleting
}
runtimeObj, ok := obj.(runtime.Object)
if !ok {
glog.Warningf("[reconcile] error asserting metav1.Object as runtime.Object: %v", err)
continue
}
controllerName := controllerRef.Name
objList := result[controllerName]
result[controllerName] = append(objList, &subresource{resourceClient, runtimeObj, subLifecycle})
}
}
// Iterate over the crs to get the list of missing sub resources
// ASSUMPTION: There is at most one subresource of each kind per
// custom resource. We use the plural form as a key
for _, item := range crList {
cr, ok := item.(crd.CustomResource)
if !ok {
glog.Warningf("[reconcile] failed to assert item %v to type CustomResource", item)
continue
}
subs, ok := result[cr.Name()]
if !ok {
glog.Warningf("[reconcile] no sub-resources found for cr %v", cr.Name())
}
// Find non-existing subresources based on the expected subresource clients.
existingSubs := map[string]struct{}{}
for _, sub := range subs {
existingSubs[sub.client.Plural()] = struct{}{}
}
for _, subClient := range r.resourceClients {
_, exists := existingSubs[subClient.Plural()]
if !exists {
result[cr.Name()] = append(subs, &subresource{subClient, nil, doesNotExist})
}
}
}
return result
}
func (subs subresources) filter(predicate func(s *subresource) bool) subresources {
var result subresources
for _, sub := range subs {
if predicate(sub) {
result = append(result, sub)
}
}
return result
}
func (subs subresources) any(predicate func(s *subresource) bool) bool {
return len(subs.filter(predicate)) > 0
}
func (subs subresources) all(predicate func(s *subresource) bool) bool {
return len(subs.filter(predicate)) == len(subs)
}
func (r *Reconciler) planAction(controllerName string, subs subresources) (*action, crd.CustomResource, error) {
// If the controller name is empty, these are not our subresources;
// do nothing.
if controllerName == "" {
return &action{}, nil, nil
}
// Compute the current lifecycle phase of the custom resource.
customResourceLifecycle := exists
crObj, err := r.crdClient.Get(r.namespace, controllerName)
if err != nil && apierrors.IsNotFound(err) {
customResourceLifecycle = doesNotExist
}
crMeta, err := meta.Accessor(crObj)
if err != nil {
glog.Warningf("[reconcile] error getting meta accessor for controlling custom resource: %v", err)
} else if crMeta.GetDeletionTimestamp() != nil {
customResourceLifecycle = deleting
}
// If the custom resource is deleting or does not exist, clean up all
// subresources.
if customResourceLifecycle.isOneOf(doesNotExist, deleting) {
return &action{subresourcesToDelete: subs}, nil, nil
}
cr, ok := crObj.(crd.CustomResource)
if !ok {
return &action{}, nil, fmt.Errorf("object retrieved from CRD client not an instance of crd.CustomResource: [%v]", crObj)
}
customResourceSpecState := cr.GetSpecState()
customResourceStatusState := cr.GetStatusState()
// If the desired custom resource state is running or completed AND
// the custom resource is in a terminal state, then delete all subresources.
if customResourceSpecState.IsOneOf(states.Running, states.Completed) &&
customResourceStatusState.IsOneOf(states.Completed, states.Failed) {
return &action{subresourcesToDelete: subs}, nil, nil
}
// If the desired custom resource state is running or completed AND
// the current custom resource status is non-terminal, ANY non-ephemeral
// subresource that is failed, does not exist or has been deleted causes
// the custom resource current state to move to failed.
if customResourceSpecState.IsOneOf(states.Running, states.Completed)
|
// custom resource, result will not have the controller name as one of its
// keys.
//
|
random_line_split
|
GPR_stan2.py
|
#import pandas as pd
#num_groups = 1
#group_no = 0
#if len(sys.argv) > 1:
# num_groups = int(sys.argv[1])
#if len(sys.argv) > 2:
# group_no = int(sys.argv[2])
star = sys.argv[1]
peak_no = int(sys.argv[2])
peak_no_str = ""
if peak_no > 0:
peak_no_str = str(peak_no) + "/"
num_iters = 50
num_chains = 4
down_sample_factor = 1
if len(sys.argv) > 3:
num_iters = int(sys.argv[3])
if len(sys.argv) > 4:
num_chains = int(sys.argv[4])
if len(sys.argv) > 5:
down_sample_factor = int(sys.argv[5])
dynamic_downsample = False
n_jobs = num_chains
n_tries = 1
downsample_iters = 1
print star, peak_no, num_iters, num_chains, down_sample_factor
data_dir = "../GP_input"
if data_dir == "../cleaned":
skiprows = 1
else:
skiprows = 0
files = []
data_found = False
for root, dirs, dir_files in os.walk(data_dir):
for file in dir_files:
if file[-4:] == ".dat":
file_star = file[:-4]
file_star = file_star.upper()
if (file_star[-3:] == '.CL'):
file_star = file_star[0:-3]
if (file_star[0:2] == 'HD'):
file_star = file_star[2:]
while star[0] == '0': # remove leading zeros
file_star = file_star[1:]
if star == file_star:
dat = np.loadtxt(data_dir+"/"+file, usecols=(0,1), skiprows=skiprows)
data_found = True
break
if not data_found:
print "Cannot find data for " + star
sys.exit(1)
offset = 1979.3452
model = pickle.load(open('model.pkl', 'rb'))
model_null = pickle.load(open('model_null.pkl', 'rb'))
t_orig = dat[:,0]
y_orig = dat[:,1]
n_orig = len(t_orig)
if dynamic_downsample:
down_sample_factor = max(1, n_orig / 500)
downsample_iters = down_sample_factor
for downsample_iter in np.arange(0, downsample_iters):
if downsample_iters > 1:
downsample_iter_str = '_' + str(downsample_iter)
else:
downsample_iter_str = ''
if down_sample_factor >= 2:
#indices = np.random.choice(len(t), len(t)/down_sample_factor, replace=False, p=None)
#indices = np.sort(indices)
#t = t[indices]
#y = y[indices]
t = t_orig[downsample_iter::down_sample_factor]
y = y_orig[downsample_iter::down_sample_factor]
else:
t = t_orig
y = y_orig
#(t, y, noise_var_prop) = mw_utils.daily_averages(t, y, mw_utils.get_seasonal_noise_var(t/365.25, y))
#noise_var_prop = mw_utils.get_seasonal_noise_var(t/365.25, y)
#np.savetxt("GPR_stan/" + star + ".dat", np.column_stack((t_daily, y_daily)), fmt='%f')
t /= 365.25
t += offset
seasonal_noise = mw_utils.get_seasonal_noise_var(t, y, per_point=False)
noise_var_prop = mw_utils.get_seasonal_noise_var(t, y)
seasonal_means_var =np.var(mw_utils.get_seasonal_means(t, y)[:,1])
n = len(t)
print "Downsample factor", float(n_orig)/n
duration = max(t) - min(t)
orig_mean = np.mean(y)
#y -= orig_mean
orig_std = np.std(y)
n = len(t)
t -= np.mean(t)
t, y, noise_var_prop = mw_utils.downsample(t, y, noise_var_prop, 15.0/365.25)
n = len(t)
var = np.var(y)
###########################################################################
# Quasiperiodic model
prior_freq_mean = 0.0
prior_freq_std = 0.167
print "prior_freq_mean, prior_freq_std: ", prior_freq_mean, prior_freq_std
initial_param_values = []
for i in np.arange(0, num_chains):
#initial_freq = np.random.uniform(0.25*i/num_chains,0.25*(i+1)/num_chains)
initial_freq = 0.5*float(i+0.5)/num_chains#np.random.uniform(0, 0.5)
#initial_freq = max(0, np.random.normal(prior_freq_mean, prior_freq_std))
initial_m = orig_mean
initial_trend_var = var / duration
#initial_inv_length_scale = 0.0001#abs(np.random.normal(0, prior_freq_mean))
#initial_param_values.append(dict(freq=initial_freq, trend_var=initial_trend_var, m=initial_m, noise_var=initial_noise_var, inv_lengh_scale=initial_inv_length_scale))
initial_param_values.append(dict(freq=initial_freq, trend_var=initial_trend_var, m=initial_m))
fit = model.sampling(data=dict(x=t,N=n,y=y,noise_var=noise_var_prop, var_y=var,
var_seasonal_means=seasonal_means_var, prior_freq_mean=prior_freq_mean, prior_freq_std=prior_freq_std),
init=initial_param_values,
iter=num_iters, chains=num_chains, n_jobs=n_jobs)
with open("results/"+peak_no_str+star + downsample_iter_str + "_results.txt", "w") as output:
output.write(str(fit))
fit.plot()
plt.savefig("results/"+peak_no_str+star + downsample_iter_str + "_results.png")
plt.close()
results = fit.extract()
loglik_samples = results['lp__']
loglik = np.mean(loglik_samples)
length_scale_samples = results['length_scale'];
(length_scale, length_scale_se) = mw_utils.mean_with_se(length_scale_samples)
length_scale2_samples = results['length_scale2'];
(length_scale2, length_scale2_se) = mw_utils.mean_with_se(length_scale2_samples)
sig_var_samples = results['sig_var']
sig_var = np.mean(sig_var_samples)
sig_var2_samples = results['sig_var2']
sig_var2 = np.mean(sig_var2_samples)
m_samples = results['m'];
m = np.mean(m_samples)
trend_var_samples = results['trend_var'];
(trend_var, trend_var_se) = mw_utils.mean_with_se(trend_var_samples)
###########################################################################
# Find optimum freq 1
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1)
fig.set_size_inches(18, 12)
freq_samples = results['freq'];
freq_freqs = gaussian_kde(freq_samples)
freqs = np.linspace(min(freq_samples), max(freq_samples), 1000)
(freq, freq_se) = mw_utils.mode_with_se(freq_samples)
local_maxima_inds = mw_utils.find_local_maxima(freq_freqs(freqs))
freq_kmeans = KMeans(n_clusters=len(local_maxima_inds)).fit(freq_samples.reshape((-1, 1)))
opt_freq_label = freq_kmeans.predict(np.array([freq]).reshape((-1, 1)))
freq_samples_ = np.sort(freq_samples[np.where(freq_kmeans.labels_ == opt_freq_label)])
inds = np.searchsorted(freqs, freq_samples_)
freqs_ = freqs[inds]
ax1.plot(freqs, freq_freqs(freqs), "b-", freqs_, freq_freqs(freqs_), 'k--')
###########################################################################
freq2_samples = results['freq2'];
freq2_freqs = gaussian_kde(freq2_samples)
freqs2 = np.linspace(min(freq2_samples), max(freq2_samples), 1000)
(freq2, freq2_se) = mw_utils.mode_with_se(freq2_samples)
local_maxima_inds = mw_utils.find_local_maxima(freq2_freqs(freqs2))
freq2_kmeans = KMeans(n_clusters=len(local_maxima_inds)).fit(freq2_samples.reshape((-1, 1)))
opt_freq2_label = freq_kmeans.predict(np.array([freq2]).reshape((-1, 1)))
freq2_samples_ = np.sort(freq_samples[np.where(freq_kmeans.labels_ == opt_freq_label)])
inds = np.searchsorted(freqs2, freq2_samples_)
freqs2_ = freqs2[inds]
ax1.plot(freqs2, freq2_freqs(freqs2), "g-", freqs2_, freq2_freqs(freqs2_), 'k--')
###########################################################################
print "var=", var
print "sig_var=", sig
|
from scipy.stats import gaussian_kde
from sklearn.cluster import KMeans
|
random_line_split
|
|
GPR_stan2.py
|
for downsample_iter in np.arange(0, downsample_iters):
if downsample_iters > 1:
downsample_iter_str = '_' + str(downsample_iter)
else:
downsample_iter_str = ''
if down_sample_factor >= 2:
#indices = np.random.choice(len(t), len(t)/down_sample_factor, replace=False, p=None)
#indices = np.sort(indices)
#t = t[indices]
#y = y[indices]
t = t_orig[downsample_iter::down_sample_factor]
y = y_orig[downsample_iter::down_sample_factor]
else:
t = t_orig
y = y_orig
#(t, y, noise_var_prop) = mw_utils.daily_averages(t, y, mw_utils.get_seasonal_noise_var(t/365.25, y))
#noise_var_prop = mw_utils.get_seasonal_noise_var(t/365.25, y)
#np.savetxt("GPR_stan/" + star + ".dat", np.column_stack((t_daily, y_daily)), fmt='%f')
t /= 365.25
t += offset
seasonal_noise = mw_utils.get_seasonal_noise_var(t, y, per_point=False)
noise_var_prop = mw_utils.get_seasonal_noise_var(t, y)
seasonal_means_var =np.var(mw_utils.get_seasonal_means(t, y)[:,1])
n = len(t)
print "Downsample factor", float(n_orig)/n
duration = max(t) - min(t)
orig_mean = np.mean(y)
#y -= orig_mean
orig_std = np.std(y)
n = len(t)
t -= np.mean(t)
t, y, noise_var_prop = mw_utils.downsample(t, y, noise_var_prop, 15.0/365.25)
n = len(t)
var = np.var(y)
###########################################################################
# Quasiperiodic model
prior_freq_mean = 0.0
prior_freq_std = 0.167
print "prior_freq_mean, prior_freq_std: ", prior_freq_mean, prior_freq_std
initial_param_values = []
for i in np.arange(0, num_chains):
#initial_freq = np.random.uniform(0.25*i/num_chains,0.25*(i+1)/num_chains)
initial_freq = 0.5*float(i+0.5)/num_chains#np.random.uniform(0, 0.5)
#initial_freq = max(0, np.random.normal(prior_freq_mean, prior_freq_std))
initial_m = orig_mean
initial_trend_var = var / duration
#initial_inv_length_scale = 0.0001#abs(np.random.normal(0, prior_freq_mean))
#initial_param_values.append(dict(freq=initial_freq, trend_var=initial_trend_var, m=initial_m, noise_var=initial_noise_var, inv_lengh_scale=initial_inv_length_scale))
initial_param_values.append(dict(freq=initial_freq, trend_var=initial_trend_var, m=initial_m))
fit = model.sampling(data=dict(x=t,N=n,y=y,noise_var=noise_var_prop, var_y=var,
var_seasonal_means=seasonal_means_var, prior_freq_mean=prior_freq_mean, prior_freq_std=prior_freq_std),
init=initial_param_values,
iter=num_iters, chains=num_chains, n_jobs=n_jobs)
with open("results/"+peak_no_str+star + downsample_iter_str + "_results.txt", "w") as output:
output.write(str(fit))
fit.plot()
plt.savefig("results/"+peak_no_str+star + downsample_iter_str + "_results.png")
plt.close()
results = fit.extract()
loglik_samples = results['lp__']
loglik = np.mean(loglik_samples)
length_scale_samples = results['length_scale'];
(length_scale, length_scale_se) = mw_utils.mean_with_se(length_scale_samples)
length_scale2_samples = results['length_scale2'];
(length_scale2, length_scale2_se) = mw_utils.mean_with_se(length_scale2_samples)
sig_var_samples = results['sig_var']
sig_var = np.mean(sig_var_samples)
sig_var2_samples = results['sig_var2']
sig_var2 = np.mean(sig_var2_samples)
m_samples = results['m'];
m = np.mean(m_samples)
trend_var_samples = results['trend_var'];
(trend_var, trend_var_se) = mw_utils.mean_with_se(trend_var_samples)
###########################################################################
# Find optimum freq 1
fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1)
fig.set_size_inches(18, 12)
freq_samples = results['freq'];
freq_freqs = gaussian_kde(freq_samples)
freqs = np.linspace(min(freq_samples), max(freq_samples), 1000)
(freq, freq_se) = mw_utils.mode_with_se(freq_samples)
local_maxima_inds = mw_utils.find_local_maxima(freq_freqs(freqs))
freq_kmeans = KMeans(n_clusters=len(local_maxima_inds)).fit(freq_samples.reshape((-1, 1)))
opt_freq_label = freq_kmeans.predict(np.array([freq]).reshape((-1, 1)))
freq_samples_ = np.sort(freq_samples[np.where(freq_kmeans.labels_ == opt_freq_label)])
inds = np.searchsorted(freqs, freq_samples_)
freqs_ = freqs[inds]
ax1.plot(freqs, freq_freqs(freqs), "b-", freqs_, freq_freqs(freqs_), 'k--')
###########################################################################
freq2_samples = results['freq2'];
freq2_freqs = gaussian_kde(freq2_samples)
freqs2 = np.linspace(min(freq2_samples), max(freq2_samples), 1000)
(freq2, freq2_se) = mw_utils.mode_with_se(freq2_samples)
local_maxima_inds = mw_utils.find_local_maxima(freq2_freqs(freqs2))
freq2_kmeans = KMeans(n_clusters=len(local_maxima_inds)).fit(freq2_samples.reshape((-1, 1)))
opt_freq2_label = freq_kmeans.predict(np.array([freq2]).reshape((-1, 1)))
freq2_samples_ = np.sort(freq_samples[np.where(freq_kmeans.labels_ == opt_freq_label)])
inds = np.searchsorted(freqs2, freq2_samples_)
freqs2_ = freqs2[inds]
ax1.plot(freqs2, freq2_freqs(freqs2), "g-", freqs2_, freq2_freqs(freqs2_), 'k--')
###########################################################################
print "var=", var
print "sig_var=", sig_var
print "sig_var2=", sig_var2
print "length_scale", length_scale
print "length_scale2", length_scale2
print "freq, freq_se", freq, freq_se
print "freq2, freq2_se", freq2, freq2_se
print "trend_var", trend_var
print "m", m
gpr_gp = GPR_QP2.GPR_QP2(sig_vars=[sig_var, sig_var2], length_scales=[length_scale, length_scale2], freqs=[freq, freq2], noise_var=noise_var_prop, trend_var=trend_var, c=0.0)
t_test = np.linspace(min(t), max(t), 500)
gpr_gp.init(t, y-m)
(f_mean, pred_var, loglik) = gpr_gp.fit(t_test)
(f_t, _, _) = gpr_gp.fit(t)
f_mean += m
fvu = np.sum((f_t + m - y)**2) / n / var
print "FVU", fvu
print "loglik", loglik #(loglik + 0.5 * n * np.log(2.0 * np.pi))
###########################################################################
#Squared-exponential GP for model comparison
initial_param_values = []
for i in np.arange(0, num_chains):
initial_m = orig_mean
initial_trend_var = var / duration
initial_param_values.append(dict(trend_var=initial_trend_var, m=initial_m))
fit_null = model_null.sampling(data=dict(x=t,N=n,y=y,noise_var=noise_var_prop, var_y=var,
var_seasonal_means=seasonal_means_var, prior_freq_mean=prior_freq_mean, prior_freq_std=prior_freq_std),
init=initial_param_values,
iter=num_iters, chains=num_chains, n_jobs=n_jobs)
with open("results/"+peak_no_str+star + downsample_iter_str + "_results_null.txt", "w") as output:
output.write(str(fit))
fit_null.plot()
plt.savefig("results/"+peak_no_str+star + downsample_iter_str + "_results_null.png")
plt.close()
results_null = fit_null.extract()
loglik_samples_null = results_null['lp__']
loglik_null = np.mean(loglik_samples_null)
|
down_sample_factor = max(1, n_orig / 500)
downsample_iters = down_sample_factor
|
conditional_block
|
|
index.js
|
() {
return (
<div>
<Head>
<title>PublicTrades</title>
<link rel="icon" href="/favicon.ico" />
<script src="https://cdnjs.cloudflare.com/ajax/libs/gsap/3.6.0/gsap.min.js"></script>
</Head>
<main>
<>
{/* This example requires Tailwind CSS v2.0+ */}
<div className="relative bg-white overflow-hidden">
<div className="max-w-7xl mx-auto">
<div className="relative z-10 pb-8 bg-white sm:pb-16 md:pb-20 lg:max-w-2xl lg:w-full lg:pb-28 xl:pb-32">
<svg className="hidden lg:block absolute right-0 inset-y-0 h-full w-48 text-white transform translate-x-1/2" fill="currentColor" viewBox="0 0 100 100" preserveAspectRatio="none" aria-hidden="true">
<polygon points="50,0 100,0 50,100 0,100" />
</svg>
<div className="relative pt-6 px-4 sm:px-6 lg:px-8">
<nav className="relative flex items-center justify-between sm:h-10 lg:justify-start" aria-label="Global">
<div className="flex items-center flex-grow flex-shrink-0 lg:flex-grow-0">
<div className="flex items-center justify-between w-full md:w-auto">
<a href="#">
{/* <span className="sr-only">Workflow</span> */}
<img className="h-8 w-auto sm:h-10" src="images/logo.png"/>
</a>
<div className="-mr-2 flex items-center md:hidden">
<button type="button" className="bg-white rounded-md p-2 inline-flex items-center justify-center text-gray-400 hover:text-gray-500 hover:bg-gray-100 focus:outline-none focus:ring-2 focus:ring-inset focus:ring-indigo-500" id="main-menu" aria-haspopup="true">
<span className="sr-only">Open main menu</span>
<svg className="h-6 w-6" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M4 6h16M4 12h16M4 18h16" />
</svg>
</button>
</div>
</div>
</div>
<div className="hidden md:block md:ml-10 md:pr-4 md:space-x-8">
{/* <a href="#" className="font-medium text-gray-500 hover:text-gray-900">Services</a>
<a href="#" className="font-medium text-gray-500 hover:text-gray-900">Products</a> */}
<Link href="login">
<a className="font-medium text-gray-500 hover:text-gray-900">Developers Login</a>
</Link>
</div>
</nav>
</div>
{/*
based on menu open state.
Entering: "duration-150 ease-out"
From: "opacity-0 scale-95"
To: "opacity-100 scale-100"
Leaving: "duration-100 ease-in"
From: "opacity-100 scale-100"
To: "opacity-0 scale-95"
*/}
<div className="absolute top-0 inset-x-0 p-2 transition transform origin-top-right md:hidden">
<div className="rounded-lg shadow-md bg-white ring-1 ring-black ring-opacity-5 overflow-hidden">
{/* <div role="menu" aria-orientation="vertical" aria-labelledby="main-menu">
<div className="px-2 pt-2 pb-3 space-y-1" role="none">
<a href="#" className="block px-3 py-2 rounded-md text-base font-medium text-gray-700 hover:text-gray-900 hover:bg-gray-50" role="menuitem">Services</a>
<a href="#" className="block px-3 py-2 rounded-md text-base font-medium text-gray-700 hover:text-gray-900 hover:bg-gray-50" role="menuitem">Products</a>
<a href="#" className="block px-3 py-2 rounded-md text-base font-medium text-gray-700 hover:text-gray-900 hover:bg-gray-50" role="menuitem">Developers</a>
</div>
<div role="none">
</div>
</div> */}
</div>
</div>
<main className="mt-10 mx-auto max-w-7xl px-4 sm:mt-12 sm:px-6 md:mt-16 lg:mt-20 lg:px-8 xl:mt-28">
<div className="sm:text-center lg:text-left">
<h1 className="text-4xl tracking-tight font-extrabold text-gray-900 sm:text-5xl md:text-6xl">
<span className="block xl:inline"><p>Developing overlooked ideas into</p></span>
<span className="block text-red-500 xl:inline">Useable products</span>
</h1>
<p className="mt-3 text-center text-gray-500 sm:mt-5 sm:text-lg sm:max-w-xl sm:mx-auto md:mt-5 md:text-xl lg:mx-0">
Ever had a need for an app service that doesn't exist and wished someone had it built already - We do too.
</p>
<div className="mt-5 sm:mt-8 sm:flex sm:justify-center lg:justify-start">
<div className="rounded-md shadow">
<a href="#" className="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base font-medium rounded-md text-white bg-red-700 hover:bg-red-400 md:py-4 md:text-lg md:px-10">
Test an idea
</a>
</div>
<div className="mt-3 sm:mt-0 sm:ml-3">
<a href="#" className="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base font-medium rounded-md text-white bg-green-700 hover:bg-green-400 md:py-4 md:text-lg md:px-10">
<span>Let's build together</span>
</a>
</div>
</div>
</div>
</main>
</div>
</div>
<div className="lg:absolute lg:inset-y-0 lg:right-0 lg:w-1/2">
<img className="h-56 w-full object-cover sm:h-72 md:h-96 lg:w-full lg:h-full" src="images/lastbulb2.jpg" alt="hero-image" />
</div>
</div>
<div className="relative bg-gray-50 pt-16 pb-20 px-4 sm:px-6 lg:pt-24 lg:pb-28 lg:px-8">
<div className="absolute inset-0">
<div className="bg-white h-1/3 sm:h-2/3" />
</div>
<div className="relative max-w-7xl mx-auto">
<div className="text-center">
<h2 className="text-3xl tracking-tight font-extrabold text-gray-900 sm:text-4xl">
Portfolio
</h2>
<p className="mt-3 mb-4 max-w-2xl mx-auto text-xl text-gray-500 sm:mt-4">
Growing list of web and mobile apps that once were just ideas, and now launched into actual product/service apps.
</p>
</div>
{/* Starts the Mid section */}
<div className="portfoliolist grid grid-cols-1 sm:grid-cols-2 md:grid-cols-3 lg:grid-cols-4 gap-4 justify-center items-center">
<Portfolio />
</div>
</div>
</div>
<footer className="bg-gray-800" aria-labelledby="footerHeading">
<h2 id="footerHeading" className="sr-only">Footer</h2>
<div className="max-w-7xl mx-auto py-12 px-4 sm:px-6 lg:py-16 lg:px-8">
<div className="xl:grid xl:grid-cols-3 xl:gap-8">
<div className="grid grid-cols-2 gap-8 xl:col-span-2">
<div className="md:grid md:grid-cols-2 md:gap-8">
<div>
<h3 className="text-sm font-semibold text-gray-400 tracking-wider uppercase">
Solutions
</h3>
<
|
Home
|
identifier_name
|
|
index.js
|
<nav className="relative flex items-center justify-between sm:h-10 lg:justify-start" aria-label="Global">
<div className="flex items-center flex-grow flex-shrink-0 lg:flex-grow-0">
<div className="flex items-center justify-between w-full md:w-auto">
<a href="#">
{/* <span className="sr-only">Workflow</span> */}
<img className="h-8 w-auto sm:h-10" src="images/logo.png"/>
</a>
<div className="-mr-2 flex items-center md:hidden">
<button type="button" className="bg-white rounded-md p-2 inline-flex items-center justify-center text-gray-400 hover:text-gray-500 hover:bg-gray-100 focus:outline-none focus:ring-2 focus:ring-inset focus:ring-indigo-500" id="main-menu" aria-haspopup="true">
<span className="sr-only">Open main menu</span>
<svg className="h-6 w-6" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M4 6h16M4 12h16M4 18h16" />
</svg>
</button>
</div>
</div>
</div>
<div className="hidden md:block md:ml-10 md:pr-4 md:space-x-8">
{/* <a href="#" className="font-medium text-gray-500 hover:text-gray-900">Services</a>
<a href="#" className="font-medium text-gray-500 hover:text-gray-900">Products</a> */}
<Link href="login">
<a className="font-medium text-gray-500 hover:text-gray-900">Developers Login</a>
</Link>
</div>
</nav>
</div>
{/*
based on menu open state.
Entering: "duration-150 ease-out"
From: "opacity-0 scale-95"
To: "opacity-100 scale-100"
Leaving: "duration-100 ease-in"
From: "opacity-100 scale-100"
To: "opacity-0 scale-95"
*/}
<div className="absolute top-0 inset-x-0 p-2 transition transform origin-top-right md:hidden">
<div className="rounded-lg shadow-md bg-white ring-1 ring-black ring-opacity-5 overflow-hidden">
{/* <div role="menu" aria-orientation="vertical" aria-labelledby="main-menu">
<div className="px-2 pt-2 pb-3 space-y-1" role="none">
<a href="#" className="block px-3 py-2 rounded-md text-base font-medium text-gray-700 hover:text-gray-900 hover:bg-gray-50" role="menuitem">Services</a>
<a href="#" className="block px-3 py-2 rounded-md text-base font-medium text-gray-700 hover:text-gray-900 hover:bg-gray-50" role="menuitem">Products</a>
<a href="#" className="block px-3 py-2 rounded-md text-base font-medium text-gray-700 hover:text-gray-900 hover:bg-gray-50" role="menuitem">Developers</a>
</div>
<div role="none">
</div>
</div> */}
</div>
</div>
<main className="mt-10 mx-auto max-w-7xl px-4 sm:mt-12 sm:px-6 md:mt-16 lg:mt-20 lg:px-8 xl:mt-28">
<div className="sm:text-center lg:text-left">
<h1 className="text-4xl tracking-tight font-extrabold text-gray-900 sm:text-5xl md:text-6xl">
<span className="block xl:inline"><p>Developing overlooked ideas into</p></span>
<span className="block text-red-500 xl:inline">Useable products</span>
</h1>
<p className="mt-3 text-center text-gray-500 sm:mt-5 sm:text-lg sm:max-w-xl sm:mx-auto md:mt-5 md:text-xl lg:mx-0">
Ever had a need for an app service that doesn't exist and wished someone had it built already - We do too.
</p>
<div className="mt-5 sm:mt-8 sm:flex sm:justify-center lg:justify-start">
<div className="rounded-md shadow">
<a href="#" className="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base font-medium rounded-md text-white bg-red-700 hover:bg-red-400 md:py-4 md:text-lg md:px-10">
Test an idea
</a>
</div>
<div className="mt-3 sm:mt-0 sm:ml-3">
<a href="#" className="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base font-medium rounded-md text-white bg-green-700 hover:bg-green-400 md:py-4 md:text-lg md:px-10">
<span>Let's build together</span>
</a>
</div>
</div>
</div>
</main>
</div>
</div>
<div className="lg:absolute lg:inset-y-0 lg:right-0 lg:w-1/2">
<img className="h-56 w-full object-cover sm:h-72 md:h-96 lg:w-full lg:h-full" src="images/lastbulb2.jpg" alt="hero-image" />
</div>
</div>
<div className="relative bg-gray-50 pt-16 pb-20 px-4 sm:px-6 lg:pt-24 lg:pb-28 lg:px-8">
<div className="absolute inset-0">
<div className="bg-white h-1/3 sm:h-2/3" />
</div>
<div className="relative max-w-7xl mx-auto">
<div className="text-center">
<h2 className="text-3xl tracking-tight font-extrabold text-gray-900 sm:text-4xl">
Portfolio
</h2>
<p className="mt-3 mb-4 max-w-2xl mx-auto text-xl text-gray-500 sm:mt-4">
Growing list of web and mobile apps that once were just ideas, and now launched into actual product/service apps.
</p>
</div>
{/* Starts the Mid section */}
<div className="portfoliolist grid grid-cols-1 sm:grid-cols-2 md:grid-cols-3 lg:grid-cols-4 gap-4 justify-center items-center">
<Portfolio />
</div>
</div>
</div>
<footer className="bg-gray-800" aria-labelledby="footerHeading">
<h2 id="footerHeading" className="sr-only">Footer</h2>
<div className="max-w-7xl mx-auto py-12 px-4 sm:px-6 lg:py-16 lg:px-8">
<div className="xl:grid xl:grid-cols-3 xl:gap-8">
<div className="grid grid-cols-2 gap-8 xl:col-span-2">
<div className="md:grid md:grid-cols-2 md:gap-8">
<div>
<h3 className="text-sm font-semibold text-gray-400 tracking-wider uppercase">
Solutions
</h3>
<ul className
|
{
return (
<div>
<Head>
<title>PublicTrades</title>
<link rel="icon" href="/favicon.ico" />
<script src="https://cdnjs.cloudflare.com/ajax/libs/gsap/3.6.0/gsap.min.js"></script>
</Head>
<main>
<>
{/* This example requires Tailwind CSS v2.0+ */}
<div className="relative bg-white overflow-hidden">
<div className="max-w-7xl mx-auto">
<div className="relative z-10 pb-8 bg-white sm:pb-16 md:pb-20 lg:max-w-2xl lg:w-full lg:pb-28 xl:pb-32">
<svg className="hidden lg:block absolute right-0 inset-y-0 h-full w-48 text-white transform translate-x-1/2" fill="currentColor" viewBox="0 0 100 100" preserveAspectRatio="none" aria-hidden="true">
<polygon points="50,0 100,0 50,100 0,100" />
</svg>
<div className="relative pt-6 px-4 sm:px-6 lg:px-8">
|
identifier_body
|
|
index.js
|
00" preserveAspectRatio="none" aria-hidden="true">
<polygon points="50,0 100,0 50,100 0,100" />
</svg>
<div className="relative pt-6 px-4 sm:px-6 lg:px-8">
<nav className="relative flex items-center justify-between sm:h-10 lg:justify-start" aria-label="Global">
<div className="flex items-center flex-grow flex-shrink-0 lg:flex-grow-0">
<div className="flex items-center justify-between w-full md:w-auto">
<a href="#">
{/* <span className="sr-only">Workflow</span> */}
<img className="h-8 w-auto sm:h-10" src="images/logo.png"/>
</a>
<div className="-mr-2 flex items-center md:hidden">
<button type="button" className="bg-white rounded-md p-2 inline-flex items-center justify-center text-gray-400 hover:text-gray-500 hover:bg-gray-100 focus:outline-none focus:ring-2 focus:ring-inset focus:ring-indigo-500" id="main-menu" aria-haspopup="true">
<span className="sr-only">Open main menu</span>
<svg className="h-6 w-6" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M4 6h16M4 12h16M4 18h16" />
</svg>
</button>
</div>
</div>
</div>
<div className="hidden md:block md:ml-10 md:pr-4 md:space-x-8">
{/* <a href="#" className="font-medium text-gray-500 hover:text-gray-900">Services</a>
<a href="#" className="font-medium text-gray-500 hover:text-gray-900">Products</a> */}
<Link href="login">
<a className="font-medium text-gray-500 hover:text-gray-900">Developers Login</a>
</Link>
</div>
</nav>
</div>
{/*
based on menu open state.
Entering: "duration-150 ease-out"
From: "opacity-0 scale-95"
To: "opacity-100 scale-100"
Leaving: "duration-100 ease-in"
From: "opacity-100 scale-100"
To: "opacity-0 scale-95"
*/}
<div className="absolute top-0 inset-x-0 p-2 transition transform origin-top-right md:hidden">
<div className="rounded-lg shadow-md bg-white ring-1 ring-black ring-opacity-5 overflow-hidden">
{/* <div role="menu" aria-orientation="vertical" aria-labelledby="main-menu">
<div className="px-2 pt-2 pb-3 space-y-1" role="none">
<a href="#" className="block px-3 py-2 rounded-md text-base font-medium text-gray-700 hover:text-gray-900 hover:bg-gray-50" role="menuitem">Services</a>
<a href="#" className="block px-3 py-2 rounded-md text-base font-medium text-gray-700 hover:text-gray-900 hover:bg-gray-50" role="menuitem">Products</a>
<a href="#" className="block px-3 py-2 rounded-md text-base font-medium text-gray-700 hover:text-gray-900 hover:bg-gray-50" role="menuitem">Developers</a>
</div>
<div role="none">
</div>
</div> */}
</div>
</div>
<main className="mt-10 mx-auto max-w-7xl px-4 sm:mt-12 sm:px-6 md:mt-16 lg:mt-20 lg:px-8 xl:mt-28">
<div className="sm:text-center lg:text-left">
<h1 className="text-4xl tracking-tight font-extrabold text-gray-900 sm:text-5xl md:text-6xl">
<span className="block xl:inline"><p>Developing overlooked ideas into</p></span>
<span className="block text-red-500 xl:inline">Useable products</span>
</h1>
<p className="mt-3 text-center text-gray-500 sm:mt-5 sm:text-lg sm:max-w-xl sm:mx-auto md:mt-5 md:text-xl lg:mx-0">
Ever had a need for an app service that doesn't exist and wished someone had it built already - We do too.
</p>
<div className="mt-5 sm:mt-8 sm:flex sm:justify-center lg:justify-start">
<div className="rounded-md shadow">
<a href="#" className="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base font-medium rounded-md text-white bg-red-700 hover:bg-red-400 md:py-4 md:text-lg md:px-10">
Test an idea
|
</a>
</div>
<div className="mt-3 sm:mt-0 sm:ml-3">
<a href="#" className="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base font-medium rounded-md text-white bg-green-700 hover:bg-green-400 md:py-4 md:text-lg md:px-10">
<span>Let's build together</span>
</a>
</div>
</div>
</div>
</main>
</div>
</div>
<div className="lg:absolute lg:inset-y-0 lg:right-0 lg:w-1/2">
<img className="h-56 w-full object-cover sm:h-72 md:h-96 lg:w-full lg:h-full" src="images/lastbulb2.jpg" alt="hero-image" />
</div>
</div>
<div className="relative bg-gray-50 pt-16 pb-20 px-4 sm:px-6 lg:pt-24 lg:pb-28 lg:px-8">
<div className="absolute inset-0">
<div className="bg-white h-1/3 sm:h-2/3" />
</div>
<div className="relative max-w-7xl mx-auto">
<div className="text-center">
<h2 className="text-3xl tracking-tight font-extrabold text-gray-900 sm:text-4xl">
Portfolio
</h2>
<p className="mt-3 mb-4 max-w-2xl mx-auto text-xl text-gray-500 sm:mt-4">
Growing list of web and mobile apps that once were just ideas, and now launched into actual product/service apps.
</p>
</div>
{/* Starts the Mid section */}
<div className="portfoliolist grid grid-cols-1 sm:grid-cols-2 md:grid-cols-3 lg:grid-cols-4 gap-4 justify-center items-center">
<Portfolio />
</div>
</div>
</div>
<footer className="bg-gray-800" aria-labelledby="footerHeading">
<h2 id="footerHeading" className="sr-only">Footer</h2>
<div className="max-w-7xl mx-auto py-12 px-4 sm:px-6 lg:py-16 lg:px-8">
<div className="xl:grid xl:grid-cols-3 xl:gap-8">
<div className="grid grid-cols-2 gap-8 xl:col-span-2">
<div className="md:grid md:grid-cols-2 md:gap-8">
<div>
<h3 className="text-sm font-semibold text-gray-400 tracking-wider uppercase">
Solutions
</h3>
<ul className="mt-4 space-y-4">
<li>
<a href="#" className="text-base text-gray-300 hover:text-white">
Marketing
</a>
</li>
<li>
<a href="#" className="text-base text-gray-300 hover:text-white">
Analytics
</a>
</li>
<li>
<a href="#" className="text-base text-gray-300 hover:text-white">
Commerce
</a>
</li>
<li>
<a href="#" className="text-base text-gray-300 hover:text-white">
Insights
</a>
</li>
</ul>
</div>
<div className="mt-12 md:mt-0">
<h3 className="text-sm font-semibold text-gray-400 tracking-wider uppercase">
Support
</h3>
<ul className="mt-4
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.