file_name
large_stringlengths 4
140
| prefix
large_stringlengths 0
12.1k
| suffix
large_stringlengths 0
12k
| middle
large_stringlengths 0
7.51k
| fim_type
large_stringclasses 4
values |
---|---|---|---|---|
MessageSigner.go
|
ature(dmessage, object, signer.GetPublicKey)
return isEncrypted, isSigned, err
}
// SignMessages returns whether messages MUST be signed on sending or receiving
func (signer *MessageSigner) SignMessages() bool {
return signer.signMessages
}
// VerifySignedMessage parses and verifies the message signature
// as per standard, the sender and signer of the message is in the message 'Sender' field. If the
// Sender field is missing then the 'address' field contains the publisher.
// or 'address' field
func (signer *MessageSigner) VerifySignedMessage(rawMessage string, object interface{}) (isSigned bool, err error) {
isSigned, err = VerifySenderJWSSignature(rawMessage, object, signer.GetPublicKey)
return isSigned, err
}
// PublishObject encapsulates the message object in a payload, signs the message, and sends it.
// If an encryption key is provided then the signed message will be encrypted.
// The object to publish will be marshalled to JSON and signed by this publisher
func (signer *MessageSigner) PublishObject(address string, retained bool, object interface{}, encryptionKey *ecdsa.PublicKey) error {
// payload, err := json.Marshal(object)
payload, err := json.MarshalIndent(object, " ", " ")
if err != nil || object == nil {
errText := fmt.Sprintf("Publisher.publishMessage: Error marshalling message for address %s: %s", address, err)
return errors.New(errText)
}
if encryptionKey != nil {
err = signer.PublishEncrypted(address, retained, string(payload), encryptionKey)
} else {
err = signer.PublishSigned(address, retained, string(payload))
}
return err
}
// SetSignMessages enables or disables message signing. Intended for testing.
func (signer *MessageSigner) SetSignMessages(sign bool) {
signer.signMessages = sign
}
// Subscribe to messages on the given address
func (signer *MessageSigner) Subscribe(
address string,
handler func(address string, message string) error)
|
// Unsubscribe to messages on the given address
func (signer *MessageSigner) Unsubscribe(
address string,
handler func(address string, message string) error) {
signer.messenger.Unsubscribe(address, handler)
}
// PublishEncrypted sign and encrypts the payload and publish the resulting message on the given address
// Signing only happens if the publisher's signingMethod is set to SigningMethodJWS
func (signer *MessageSigner) PublishEncrypted(
address string, retained bool, payload string, publicKey *ecdsa.PublicKey) error {
var err error
message := payload
// first sign, then encrypt as per RFC
if signer.signMessages {
message, _ = CreateJWSSignature(string(payload), signer.privateKey)
}
emessage, err := EncryptMessage(message, publicKey)
err = signer.messenger.Publish(address, retained, emessage)
return err
}
// PublishSigned sign the payload and publish the resulting message on the given address
// Signing only happens if the publisher's signingMethod is set to SigningMethodJWS
func (signer *MessageSigner) PublishSigned(
address string, retained bool, payload string) error {
var err error
// default is unsigned
message := payload
if signer.signMessages {
message, err = CreateJWSSignature(string(payload), signer.privateKey)
if err != nil {
logrus.Errorf("Publisher.publishMessage: Error signing message for address %s: %s", address, err)
}
}
err = signer.messenger.Publish(address, retained, message)
return err
}
// NewMessageSigner creates a new instance for signing and verifying published messages
// If getPublicKey is not provided, verification of signature is skipped
func NewMessageSigner(messenger IMessenger, signingKey *ecdsa.PrivateKey,
getPublicKey func(address string) *ecdsa.PublicKey,
) *MessageSigner {
signer := &MessageSigner{
GetPublicKey: getPublicKey,
messenger: messenger,
signMessages: true,
privateKey: signingKey, // private key for signing
}
return signer
}
/*
* Helper Functions for signing and verification
*/
// CreateEcdsaSignature creates a ECDSA256 signature from the payload using the provided private key
// This returns a base64url encoded signature
func CreateEcdsaSignature(payload []byte, privateKey *ecdsa.PrivateKey) string {
if privateKey == nil {
return ""
}
hashed := sha256.Sum256(payload)
r, s, err := ecdsa.Sign(rand.Reader, privateKey, hashed[:])
if err != nil {
return ""
}
sig, err := asn1.Marshal(ECDSASignature{r, s})
return base64.URLEncoding.EncodeToString(sig)
}
// SignIdentity updates the base64URL encoded ECDSA256 signature of the public identity
func SignIdentity(publicIdent *types.PublisherIdentityMessage, privKey *ecdsa.PrivateKey) {
identCopy := *publicIdent
identCopy.IdentitySignature = ""
payload, _ := json.Marshal(identCopy)
sigStr := CreateEcdsaSignature(payload, privKey)
publicIdent.IdentitySignature = sigStr
}
// CreateJWSSignature signs the payload using JSE ES256 and return the JSE compact serialized message
func CreateJWSSignature(payload string, privateKey *ecdsa.PrivateKey) (string, error) {
joseSigner, err := jose.NewSigner(jose.SigningKey{Algorithm: jose.ES256, Key: privateKey}, nil)
if err != nil {
return "", err
}
signedObject, err := joseSigner.Sign([]byte(payload))
if err != nil {
return "", err
}
// serialized := signedObject.FullSerialize()
serialized, err := signedObject.CompactSerialize()
return serialized, err
}
// DecryptMessage deserializes and decrypts the message using JWE
// This returns the decrypted message, or the input message if the message was not encrypted
func DecryptMessage(serialized string, privateKey *ecdsa.PrivateKey) (message string, isEncrypted bool, err error) {
message = serialized
decrypter, err := jose.ParseEncrypted(serialized)
if err == nil {
dmessage, err := decrypter.Decrypt(privateKey)
message = string(dmessage)
return message, true, err
}
return message, false, err
}
// EncryptMessage encrypts and serializes the message using JWE
func EncryptMessage(message string, publicKey *ecdsa.PublicKey) (serialized string, err error) {
var jwe *jose.JSONWebEncryption
recpnt := jose.Recipient{Algorithm: jose.ECDH_ES, Key: publicKey}
encrypter, err := jose.NewEncrypter(jose.A128CBC_HS256, recpnt, nil)
if encrypter != nil {
jwe, err = encrypter.Encrypt([]byte(message))
}
if err != nil {
return message, err
}
serialized, _ = jwe.CompactSerialize()
return serialized, err
}
// VerifyIdentitySignature verifies a base64URL encoded ECDSA256 signature in the identity
// against the identity itself using the sender's public key.
func VerifyIdentitySignature(ident *types.PublisherIdentityMessage, pubKey *ecdsa.PublicKey) error {
// the signing took place with the signature field empty
identCopy := *ident
identCopy.IdentitySignature = ""
payload, _ := json.Marshal(identCopy)
err := VerifyEcdsaSignature(payload, ident.IdentitySignature, pubKey)
// signingKey := jose.SigningKey{Algorithm: jose.ES256, Key: privKey}
// joseSigner, _ := jose.NewSigner(signingKey, nil)
// jwsObject, _ := joseSigner.Verify(payload)
// sig := jwsObject.Signatures[0].Signature
// sigStr := base64.URLEncoding.EncodeToString(sig)
// return sigStr
return err
}
// VerifyEcdsaSignature the payload using the base64url encoded signature and public key
// payload is any raw data
// signatureB64urlEncoded is the ecdsa 256 URL encoded signature
// Intended for signing an object like the publisher identity. Use VerifyJWSMessage for
// verifying JWS signed messages.
func VerifyEcdsaSignature(payload []byte, signatureB64urlEncoded string, publicKey *ecdsa.PublicKey) error {
var rs ECDSASignature
if publicKey == nil {
return errors.New("VerifyEcdsaSignature: publicKey is nil")
}
signature, err := base64.URLEncoding.DecodeString(signatureB64urlEncoded)
if err != nil {
return errors.New("VerifyEcdsaSignature: Invalid signature")
}
if _, err = asn1.Unmarshal(signature, &rs); err != nil {
return errors.New("VerifyEcdsaSignature: Payload is not ASN")
}
hashed := sha256.Sum256(payload)
verified := ecdsa.Verify(publicKey, hashed[:], rs.R, rs.S)
if !verified {
return errors.New("VerifyEcdsaSignature: Signature does not match payload")
}
return nil
}
// VerifyJWSMessage verifies a signed message and returns its payload
// The message is a JWS encoded string. The public key of the sender is
// needed to verify the message.
//
|
{
signer.messenger.Subscribe(address, handler)
}
|
identifier_body
|
main.rs
|
`None`, it assumes `jq` is in
/// the system path and attempts to invoke it using simply the command `jq`. Otherwise, it invokes
/// `jq` using the provided path.
async fn run_jq(
filter: &String,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let mut cmd_jq = match jq_path {
Some(path) => {
let command_str = path.as_path().to_str().unwrap();
if !Path::exists(Path::new(&command_str)) {
return Err(anyhow::anyhow!(
"Path provided in path-to-jq option did not specify a valid path to a binary."
));
}
Command::new(command_str)
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
None => {
let command_string = OsString::from("fx");
Command::new(&command_string)
.arg("jq")
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
};
let mut cmd_jq_stdin = cmd_jq.stdin.take().unwrap();
let bytestring = json_string.as_bytes();
let mut writer = BufWriter::new(&mut cmd_jq_stdin);
writer.write_all(bytestring)?;
//Close stdin
writer.flush()?;
drop(writer);
drop(cmd_jq_stdin);
let status = cmd_jq.wait()?;
let mut cmd_jq_stdout = String::new();
let mut cmd_jq_stderr = String::new();
let stdout = cmd_jq.stdout;
let stderr = cmd_jq.stderr;
if let Some(mut err) = stderr {
err.read_to_string(&mut cmd_jq_stderr)?;
Err(anyhow::anyhow!("jq produced the following error message:\n {}", cmd_jq_stderr))
} else if let Some(mut out) = stdout {
out.read_to_string(&mut cmd_jq_stdout)?;
Ok(cmd_jq_stdout)
} else if !status.success() {
Err(anyhow::anyhow!("jq returned with non-zero exit code but no error message"))
} else {
Err(anyhow::anyhow!("jq returned exit code 0 but no output or error message"))
}
}
/// Calls jq on the provided json and then fills back comments at correct places.
async fn run_jq5(
filter: &String,
parsed_json5: ParsedDocument,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let jq_out = run_jq(&filter, json_string, jq_path).await?;
let mut parsed_json = ParsedDocument::from_string(jq_out, None)?;
traverser::fill_comments(&parsed_json5.content, &mut parsed_json.content)?;
let format = Json5Format::new()?;
Ok(format.to_string(&parsed_json)?)
}
/// Calls `run_jq5` on the contents of a file and returns the return value of `run_jq5`.
async fn run_jq5_on_file(
filter: &String,
file: &PathBuf,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let (parsed_json5, json_string) = reader::read_json5_fromfile(&file)?;
run_jq5(&filter, parsed_json5, json_string, jq_path).await
}
async fn run(
filter: String,
files: Vec<PathBuf>,
jq_path: &Option<PathBuf>,
) -> Result<Vec<String>, anyhow::Error> {
let mut jq5_output_futures = Vec::with_capacity(files.len());
for file in files.iter() {
jq5_output_futures.push(run_jq5_on_file(&filter, file, &jq_path));
}
let jq5_outputs = join_all(jq5_output_futures).await;
let mut trusted_outs = Vec::with_capacity(jq5_outputs.len());
for (i, jq5_output) in jq5_outputs.into_iter().enumerate() {
match jq5_output {
Err(err) => {
return Err(anyhow::anyhow!(
r"jq5 encountered an error processing at least one of the provided json5 objects.
The first error occurred while processing file'{}':
{}",
files[i].as_path().to_str().unwrap(),
err
));
}
Ok(output) => {
trusted_outs.push(output);
}
}
}
Ok(trusted_outs)
}
#[fuchsia_async::run_singlethreaded]
async fn main() -> Result<(), anyhow::Error> {
eprintln!("{}", "This tool is a work in progress: use with caution.\n");
let args = Opt::from_args();
if args.files.len() == 0 {
let (parsed_json5, json_string) = reader::read_json5_from_input(&mut io::stdin())?;
let out = run_jq5(&args.filter, parsed_json5, json_string, &args.jq_path).await?;
io::stdout().write_all(out.as_bytes())?;
} else {
let outs = run(args.filter, args.files, &args.jq_path).await?;
for out in outs {
io::stdout().write_all(out.as_bytes())?;
}
}
Ok(())
}
#[derive(Debug, StructOpt)]
#[structopt(
name = "jq5",
about = "An extension of jq to work on json5 objects. \nThis tool is a work in progress: use with caution."
)]
struct Opt {
// TODO(72435) Add relevant options from jq
filter: String,
#[structopt(parse(from_os_str))]
files: Vec<PathBuf>,
#[structopt(long = "--path-to-jq", parse(from_os_str))]
jq_path: Option<PathBuf>,
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
use std::fs::OpenOptions;
const JQ_PATH_STR: &str = env!("JQ_PATH");
// Tests that run_jq successfully invokes jq using the identity filter and
// an empty JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_1() {
let filter = String::from(".");
let input = String::from("{}");
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(run_jq(&filter, input, &jq_path).await.unwrap(), "{}\n");
}
// Tests that run_jq successfully invokes jq using the identity filter and a
// simple JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_2() {
let filter = String::from(".");
let input = String::from(r#"{"foo": 1, "bar": 2}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo": 1,
"bar": 2
}
"##
);
}
// Tests a simple filter and simple object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_deconstruct_filter() {
let filter = String::from("{foo2: .foo1, bar2: .bar1}");
let input = String::from(r#"{"foo1": 0, "bar1": 42}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo2": 0,
"bar2": 42
}
"##
|
let filter = String::from("{foo: .foo, baz: .bar}");
let json5_string = String::from(
r##"{
//Foo
foo: 0,
//Bar
bar: 42
}"##,
);
let format = Json5Format::new().unwrap();
let (parsed_json5, json_string) = reader::read_json5(json5_string).unwrap();
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq5(&filter, parsed_json5, json_string, &jq_path).await.unwrap(),
format
.to_string(
&ParsedDocument::from_str(
r##"{
//Foo
foo: 0,
baz: 42
}"##,
None
)
.unwrap()
)
.unwrap()
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_on_file_w_id_filter() {
let tmp_path = PathBuf::from(r"/tmp/read_from_file_2.json5");
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(tmp_path.as_path())
.unwrap();
let json5_string = String::from(
r##"{
"name": {
"last": "Smith",
|
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_deconstruct_filter() {
|
random_line_split
|
main.rs
|
`None`, it assumes `jq` is in
/// the system path and attempts to invoke it using simply the command `jq`. Otherwise, it invokes
/// `jq` using the provided path.
async fn run_jq(
filter: &String,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let mut cmd_jq = match jq_path {
Some(path) => {
let command_str = path.as_path().to_str().unwrap();
if !Path::exists(Path::new(&command_str)) {
return Err(anyhow::anyhow!(
"Path provided in path-to-jq option did not specify a valid path to a binary."
));
}
Command::new(command_str)
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
None => {
let command_string = OsString::from("fx");
Command::new(&command_string)
.arg("jq")
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
};
let mut cmd_jq_stdin = cmd_jq.stdin.take().unwrap();
let bytestring = json_string.as_bytes();
let mut writer = BufWriter::new(&mut cmd_jq_stdin);
writer.write_all(bytestring)?;
//Close stdin
writer.flush()?;
drop(writer);
drop(cmd_jq_stdin);
let status = cmd_jq.wait()?;
let mut cmd_jq_stdout = String::new();
let mut cmd_jq_stderr = String::new();
let stdout = cmd_jq.stdout;
let stderr = cmd_jq.stderr;
if let Some(mut err) = stderr {
err.read_to_string(&mut cmd_jq_stderr)?;
Err(anyhow::anyhow!("jq produced the following error message:\n {}", cmd_jq_stderr))
} else if let Some(mut out) = stdout {
out.read_to_string(&mut cmd_jq_stdout)?;
Ok(cmd_jq_stdout)
} else if !status.success() {
Err(anyhow::anyhow!("jq returned with non-zero exit code but no error message"))
} else {
Err(anyhow::anyhow!("jq returned exit code 0 but no output or error message"))
}
}
/// Calls jq on the provided json and then fills back comments at correct places.
async fn run_jq5(
filter: &String,
parsed_json5: ParsedDocument,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let jq_out = run_jq(&filter, json_string, jq_path).await?;
let mut parsed_json = ParsedDocument::from_string(jq_out, None)?;
traverser::fill_comments(&parsed_json5.content, &mut parsed_json.content)?;
let format = Json5Format::new()?;
Ok(format.to_string(&parsed_json)?)
}
/// Calls `run_jq5` on the contents of a file and returns the return value of `run_jq5`.
async fn run_jq5_on_file(
filter: &String,
file: &PathBuf,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let (parsed_json5, json_string) = reader::read_json5_fromfile(&file)?;
run_jq5(&filter, parsed_json5, json_string, jq_path).await
}
async fn run(
filter: String,
files: Vec<PathBuf>,
jq_path: &Option<PathBuf>,
) -> Result<Vec<String>, anyhow::Error> {
let mut jq5_output_futures = Vec::with_capacity(files.len());
for file in files.iter() {
jq5_output_futures.push(run_jq5_on_file(&filter, file, &jq_path));
}
let jq5_outputs = join_all(jq5_output_futures).await;
let mut trusted_outs = Vec::with_capacity(jq5_outputs.len());
for (i, jq5_output) in jq5_outputs.into_iter().enumerate() {
match jq5_output {
Err(err) => {
return Err(anyhow::anyhow!(
r"jq5 encountered an error processing at least one of the provided json5 objects.
The first error occurred while processing file'{}':
{}",
files[i].as_path().to_str().unwrap(),
err
));
}
Ok(output) => {
trusted_outs.push(output);
}
}
}
Ok(trusted_outs)
}
#[fuchsia_async::run_singlethreaded]
async fn main() -> Result<(), anyhow::Error> {
eprintln!("{}", "This tool is a work in progress: use with caution.\n");
let args = Opt::from_args();
if args.files.len() == 0 {
let (parsed_json5, json_string) = reader::read_json5_from_input(&mut io::stdin())?;
let out = run_jq5(&args.filter, parsed_json5, json_string, &args.jq_path).await?;
io::stdout().write_all(out.as_bytes())?;
} else {
let outs = run(args.filter, args.files, &args.jq_path).await?;
for out in outs {
io::stdout().write_all(out.as_bytes())?;
}
}
Ok(())
}
#[derive(Debug, StructOpt)]
#[structopt(
name = "jq5",
about = "An extension of jq to work on json5 objects. \nThis tool is a work in progress: use with caution."
)]
struct Opt {
// TODO(72435) Add relevant options from jq
filter: String,
#[structopt(parse(from_os_str))]
files: Vec<PathBuf>,
#[structopt(long = "--path-to-jq", parse(from_os_str))]
jq_path: Option<PathBuf>,
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
use std::fs::OpenOptions;
const JQ_PATH_STR: &str = env!("JQ_PATH");
// Tests that run_jq successfully invokes jq using the identity filter and
// an empty JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_1() {
let filter = String::from(".");
let input = String::from("{}");
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(run_jq(&filter, input, &jq_path).await.unwrap(), "{}\n");
}
// Tests that run_jq successfully invokes jq using the identity filter and a
// simple JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_2() {
let filter = String::from(".");
let input = String::from(r#"{"foo": 1, "bar": 2}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo": 1,
"bar": 2
}
"##
);
}
// Tests a simple filter and simple object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_deconstruct_filter()
|
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_deconstruct_filter() {
let filter = String::from("{foo: .foo, baz: .bar}");
let json5_string = String::from(
r##"{
//Foo
foo: 0,
//Bar
bar: 42
}"##,
);
let format = Json5Format::new().unwrap();
let (parsed_json5, json_string) = reader::read_json5(json5_string).unwrap();
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq5(&filter, parsed_json5, json_string, &jq_path).await.unwrap(),
format
.to_string(
&ParsedDocument::from_str(
r##"{
//Foo
foo: 0,
baz: 42
}"##,
None
)
.unwrap()
)
.unwrap()
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_on_file_w_id_filter() {
let tmp_path = PathBuf::from(r"/tmp/read_from_file_2.json5");
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(tmp_path.as_path())
.unwrap();
let json5_string = String::from(
r##"{
"name": {
"last": "
|
{
let filter = String::from("{foo2: .foo1, bar2: .bar1}");
let input = String::from(r#"{"foo1": 0, "bar1": 42}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo2": 0,
"bar2": 42
}
"##
);
}
|
identifier_body
|
main.rs
|
`None`, it assumes `jq` is in
/// the system path and attempts to invoke it using simply the command `jq`. Otherwise, it invokes
/// `jq` using the provided path.
async fn run_jq(
filter: &String,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let mut cmd_jq = match jq_path {
Some(path) => {
let command_str = path.as_path().to_str().unwrap();
if !Path::exists(Path::new(&command_str)) {
return Err(anyhow::anyhow!(
"Path provided in path-to-jq option did not specify a valid path to a binary."
));
}
Command::new(command_str)
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
None => {
let command_string = OsString::from("fx");
Command::new(&command_string)
.arg("jq")
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
};
let mut cmd_jq_stdin = cmd_jq.stdin.take().unwrap();
let bytestring = json_string.as_bytes();
let mut writer = BufWriter::new(&mut cmd_jq_stdin);
writer.write_all(bytestring)?;
//Close stdin
writer.flush()?;
drop(writer);
drop(cmd_jq_stdin);
let status = cmd_jq.wait()?;
let mut cmd_jq_stdout = String::new();
let mut cmd_jq_stderr = String::new();
let stdout = cmd_jq.stdout;
let stderr = cmd_jq.stderr;
if let Some(mut err) = stderr {
err.read_to_string(&mut cmd_jq_stderr)?;
Err(anyhow::anyhow!("jq produced the following error message:\n {}", cmd_jq_stderr))
} else if let Some(mut out) = stdout {
out.read_to_string(&mut cmd_jq_stdout)?;
Ok(cmd_jq_stdout)
} else if !status.success() {
Err(anyhow::anyhow!("jq returned with non-zero exit code but no error message"))
} else {
Err(anyhow::anyhow!("jq returned exit code 0 but no output or error message"))
}
}
/// Calls jq on the provided json and then fills back comments at correct places.
async fn run_jq5(
filter: &String,
parsed_json5: ParsedDocument,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let jq_out = run_jq(&filter, json_string, jq_path).await?;
let mut parsed_json = ParsedDocument::from_string(jq_out, None)?;
traverser::fill_comments(&parsed_json5.content, &mut parsed_json.content)?;
let format = Json5Format::new()?;
Ok(format.to_string(&parsed_json)?)
}
/// Calls `run_jq5` on the contents of a file and returns the return value of `run_jq5`.
async fn
|
(
filter: &String,
file: &PathBuf,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let (parsed_json5, json_string) = reader::read_json5_fromfile(&file)?;
run_jq5(&filter, parsed_json5, json_string, jq_path).await
}
async fn run(
filter: String,
files: Vec<PathBuf>,
jq_path: &Option<PathBuf>,
) -> Result<Vec<String>, anyhow::Error> {
let mut jq5_output_futures = Vec::with_capacity(files.len());
for file in files.iter() {
jq5_output_futures.push(run_jq5_on_file(&filter, file, &jq_path));
}
let jq5_outputs = join_all(jq5_output_futures).await;
let mut trusted_outs = Vec::with_capacity(jq5_outputs.len());
for (i, jq5_output) in jq5_outputs.into_iter().enumerate() {
match jq5_output {
Err(err) => {
return Err(anyhow::anyhow!(
r"jq5 encountered an error processing at least one of the provided json5 objects.
The first error occurred while processing file'{}':
{}",
files[i].as_path().to_str().unwrap(),
err
));
}
Ok(output) => {
trusted_outs.push(output);
}
}
}
Ok(trusted_outs)
}
#[fuchsia_async::run_singlethreaded]
async fn main() -> Result<(), anyhow::Error> {
eprintln!("{}", "This tool is a work in progress: use with caution.\n");
let args = Opt::from_args();
if args.files.len() == 0 {
let (parsed_json5, json_string) = reader::read_json5_from_input(&mut io::stdin())?;
let out = run_jq5(&args.filter, parsed_json5, json_string, &args.jq_path).await?;
io::stdout().write_all(out.as_bytes())?;
} else {
let outs = run(args.filter, args.files, &args.jq_path).await?;
for out in outs {
io::stdout().write_all(out.as_bytes())?;
}
}
Ok(())
}
#[derive(Debug, StructOpt)]
#[structopt(
name = "jq5",
about = "An extension of jq to work on json5 objects. \nThis tool is a work in progress: use with caution."
)]
struct Opt {
// TODO(72435) Add relevant options from jq
filter: String,
#[structopt(parse(from_os_str))]
files: Vec<PathBuf>,
#[structopt(long = "--path-to-jq", parse(from_os_str))]
jq_path: Option<PathBuf>,
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
use std::fs::OpenOptions;
const JQ_PATH_STR: &str = env!("JQ_PATH");
// Tests that run_jq successfully invokes jq using the identity filter and
// an empty JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_1() {
let filter = String::from(".");
let input = String::from("{}");
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(run_jq(&filter, input, &jq_path).await.unwrap(), "{}\n");
}
// Tests that run_jq successfully invokes jq using the identity filter and a
// simple JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_2() {
let filter = String::from(".");
let input = String::from(r#"{"foo": 1, "bar": 2}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo": 1,
"bar": 2
}
"##
);
}
// Tests a simple filter and simple object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_deconstruct_filter() {
let filter = String::from("{foo2: .foo1, bar2: .bar1}");
let input = String::from(r#"{"foo1": 0, "bar1": 42}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo2": 0,
"bar2": 42
}
"##
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_deconstruct_filter() {
let filter = String::from("{foo: .foo, baz: .bar}");
let json5_string = String::from(
r##"{
//Foo
foo: 0,
//Bar
bar: 42
}"##,
);
let format = Json5Format::new().unwrap();
let (parsed_json5, json_string) = reader::read_json5(json5_string).unwrap();
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq5(&filter, parsed_json5, json_string, &jq_path).await.unwrap(),
format
.to_string(
&ParsedDocument::from_str(
r##"{
//Foo
foo: 0,
baz: 42
}"##,
None
)
.unwrap()
)
.unwrap()
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_on_file_w_id_filter() {
let tmp_path = PathBuf::from(r"/tmp/read_from_file_2.json5");
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(tmp_path.as_path())
.unwrap();
let json5_string = String::from(
r##"{
"name": {
"last": "Smith
|
run_jq5_on_file
|
identifier_name
|
main.rs
|
`None`, it assumes `jq` is in
/// the system path and attempts to invoke it using simply the command `jq`. Otherwise, it invokes
/// `jq` using the provided path.
async fn run_jq(
filter: &String,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let mut cmd_jq = match jq_path {
Some(path) => {
let command_str = path.as_path().to_str().unwrap();
if !Path::exists(Path::new(&command_str)) {
return Err(anyhow::anyhow!(
"Path provided in path-to-jq option did not specify a valid path to a binary."
));
}
Command::new(command_str)
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
None => {
let command_string = OsString::from("fx");
Command::new(&command_string)
.arg("jq")
.arg(&filter[..])
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?
}
};
let mut cmd_jq_stdin = cmd_jq.stdin.take().unwrap();
let bytestring = json_string.as_bytes();
let mut writer = BufWriter::new(&mut cmd_jq_stdin);
writer.write_all(bytestring)?;
//Close stdin
writer.flush()?;
drop(writer);
drop(cmd_jq_stdin);
let status = cmd_jq.wait()?;
let mut cmd_jq_stdout = String::new();
let mut cmd_jq_stderr = String::new();
let stdout = cmd_jq.stdout;
let stderr = cmd_jq.stderr;
if let Some(mut err) = stderr {
err.read_to_string(&mut cmd_jq_stderr)?;
Err(anyhow::anyhow!("jq produced the following error message:\n {}", cmd_jq_stderr))
} else if let Some(mut out) = stdout {
out.read_to_string(&mut cmd_jq_stdout)?;
Ok(cmd_jq_stdout)
} else if !status.success()
|
else {
Err(anyhow::anyhow!("jq returned exit code 0 but no output or error message"))
}
}
/// Calls jq on the provided json and then fills back comments at correct places.
async fn run_jq5(
filter: &String,
parsed_json5: ParsedDocument,
json_string: String,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let jq_out = run_jq(&filter, json_string, jq_path).await?;
let mut parsed_json = ParsedDocument::from_string(jq_out, None)?;
traverser::fill_comments(&parsed_json5.content, &mut parsed_json.content)?;
let format = Json5Format::new()?;
Ok(format.to_string(&parsed_json)?)
}
/// Calls `run_jq5` on the contents of a file and returns the return value of `run_jq5`.
async fn run_jq5_on_file(
filter: &String,
file: &PathBuf,
jq_path: &Option<PathBuf>,
) -> Result<String, anyhow::Error> {
let (parsed_json5, json_string) = reader::read_json5_fromfile(&file)?;
run_jq5(&filter, parsed_json5, json_string, jq_path).await
}
async fn run(
filter: String,
files: Vec<PathBuf>,
jq_path: &Option<PathBuf>,
) -> Result<Vec<String>, anyhow::Error> {
let mut jq5_output_futures = Vec::with_capacity(files.len());
for file in files.iter() {
jq5_output_futures.push(run_jq5_on_file(&filter, file, &jq_path));
}
let jq5_outputs = join_all(jq5_output_futures).await;
let mut trusted_outs = Vec::with_capacity(jq5_outputs.len());
for (i, jq5_output) in jq5_outputs.into_iter().enumerate() {
match jq5_output {
Err(err) => {
return Err(anyhow::anyhow!(
r"jq5 encountered an error processing at least one of the provided json5 objects.
The first error occurred while processing file'{}':
{}",
files[i].as_path().to_str().unwrap(),
err
));
}
Ok(output) => {
trusted_outs.push(output);
}
}
}
Ok(trusted_outs)
}
#[fuchsia_async::run_singlethreaded]
async fn main() -> Result<(), anyhow::Error> {
eprintln!("{}", "This tool is a work in progress: use with caution.\n");
let args = Opt::from_args();
if args.files.len() == 0 {
let (parsed_json5, json_string) = reader::read_json5_from_input(&mut io::stdin())?;
let out = run_jq5(&args.filter, parsed_json5, json_string, &args.jq_path).await?;
io::stdout().write_all(out.as_bytes())?;
} else {
let outs = run(args.filter, args.files, &args.jq_path).await?;
for out in outs {
io::stdout().write_all(out.as_bytes())?;
}
}
Ok(())
}
#[derive(Debug, StructOpt)]
#[structopt(
name = "jq5",
about = "An extension of jq to work on json5 objects. \nThis tool is a work in progress: use with caution."
)]
struct Opt {
// TODO(72435) Add relevant options from jq
filter: String,
#[structopt(parse(from_os_str))]
files: Vec<PathBuf>,
#[structopt(long = "--path-to-jq", parse(from_os_str))]
jq_path: Option<PathBuf>,
}
#[cfg(test)]
mod tests {
use super::*;
use std::env;
use std::fs::OpenOptions;
const JQ_PATH_STR: &str = env!("JQ_PATH");
// Tests that run_jq successfully invokes jq using the identity filter and
// an empty JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_1() {
let filter = String::from(".");
let input = String::from("{}");
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(run_jq(&filter, input, &jq_path).await.unwrap(), "{}\n");
}
// Tests that run_jq successfully invokes jq using the identity filter and a
// simple JSON object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_id_filter_2() {
let filter = String::from(".");
let input = String::from(r#"{"foo": 1, "bar": 2}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo": 1,
"bar": 2
}
"##
);
}
// Tests a simple filter and simple object.
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq_deconstruct_filter() {
let filter = String::from("{foo2: .foo1, bar2: .bar1}");
let input = String::from(r#"{"foo1": 0, "bar1": 42}"#);
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq(&filter, input, &jq_path).await.unwrap(),
r##"{
"foo2": 0,
"bar2": 42
}
"##
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_deconstruct_filter() {
let filter = String::from("{foo: .foo, baz: .bar}");
let json5_string = String::from(
r##"{
//Foo
foo: 0,
//Bar
bar: 42
}"##,
);
let format = Json5Format::new().unwrap();
let (parsed_json5, json_string) = reader::read_json5(json5_string).unwrap();
let jq_path = Some(PathBuf::from(JQ_PATH_STR));
assert_eq!(
run_jq5(&filter, parsed_json5, json_string, &jq_path).await.unwrap(),
format
.to_string(
&ParsedDocument::from_str(
r##"{
//Foo
foo: 0,
baz: 42
}"##,
None
)
.unwrap()
)
.unwrap()
);
}
#[fuchsia_async::run_singlethreaded(test)]
async fn run_jq5_on_file_w_id_filter() {
let tmp_path = PathBuf::from(r"/tmp/read_from_file_2.json5");
let mut file = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(tmp_path.as_path())
.unwrap();
let json5_string = String::from(
r##"{
"name": {
"last": "Smith
|
{
Err(anyhow::anyhow!("jq returned with non-zero exit code but no error message"))
}
|
conditional_block
|
DAQClient.py
|
getAttributes(self, bean, fldList):
attrs = self.__client.mbean.getAttributes(bean, fldList)
if type(attrs) == dict and len(attrs) > 0:
for k in attrs.keys():
attrs[k] = self.__unFixValue(attrs[k])
return attrs
def getBeanNames(self):
return self.__beanList
def getBeanFields(self, bean):
if bean not in self.__beanList:
msg = "Bean %s not in list of beans for %s" % \
(bean, self.__compName)
raise BeanFieldNotFoundException(msg)
return self.__beanFields[bean]
class ComponentName(object):
"DAQ component name"
def __init__(self, name, num):
self.__name = name
self.__num = num
def __repr__(self):
return self.fullName()
def fileName(self):
return '%s-%d' % (self.__name, self.__num)
def fullName(self):
if self.__num == 0 and self.__name[-3:].lower() != 'hub':
return self.__name
return '%s#%d' % (self.__name, self.__num)
def isBuilder(self):
"Is this an eventBuilder (or debugging fooBuilder)?"
return self.__name.endswith("Builder")
def isComponent(self, name, num=-1):
"Does this component have the specified name and number?"
return self.__name == name and (num < 0 or self.__num == num)
def isHub(self):
return self.__name.endswith("Hub")
def name(self):
return self.__name
def num(self):
return self.__num
class DAQClientException(Exception): pass
class DAQClient(ComponentName):
"""DAQ component
id - internal client ID
name - component name
num - component instance number
host - component host name
port - component port number
mbeanPort - component's MBean server port number
connectors - list of Connectors
client - XML-RPC client
deadCount - number of sequential failed pings
cmdOrder - order in which start/stop commands are issued
"""
# next component ID
#
ID = UniqueID()
# internal state indicating that the client hasn't answered
# some number of pings but has not been declared dead
#
STATE_MISSING = 'MIA'
# internal state indicating that the client is
# no longer responding to pings
#
STATE_DEAD = RunSet.STATE_DEAD
def __init__(self, name, num, host, port, mbeanPort, connectors,
quiet=False):
"""
DAQClient constructor
name - component name
num - component instance number
host - component host name
port - component port number
mbeanPort - component MBean port number
connectors - list of Connectors
"""
super(DAQClient, self).__init__(name, num)
self.__id = DAQClient.ID.next()
self.__host = host
self.__port = port
self.__mbeanPort = mbeanPort
self.__connectors = connectors
self.__deadCount = 0
self.__cmdOrder = None
self.__log = self.createLogger(quiet=quiet)
self.__client = self.createClient(host, port)
try:
self.__mbean = self.createMBeanClient(host, mbeanPort)
except:
self.__mbean = None
def __str__(self):
"String description"
if self.__port <= 0:
hpStr = ''
else:
hpStr = ' at %s:%d' % (self.__host, self.__port)
if self.__mbeanPort <= 0:
mbeanStr = ''
else:
mbeanStr = ' M#%d' % self.__mbeanPort
extraStr = ''
if self.__connectors and len(self.__connectors) > 0:
first = True
for c in self.__connectors:
if first:
extraStr += ' [' + str(c)
first = False
else:
extraStr += ' ' + str(c)
extraStr += ']'
return "ID#%d %s%s%s%s" % \
(self.__id, self.fullName(), hpStr, mbeanStr, extraStr)
def checkBeanField(self, bean, field):
if self.__mbean is not None:
self.__mbean.checkBeanField(bean, field)
def close(self):
self.__log.close()
def commitSubrun(self, subrunNum, latestTime):
"Start marking events with the subrun number"
try:
return self.__client.xmlrpc.commitSubrun(subrunNum, latestTime)
except:
self.__log.error(exc_string())
return None
def configure(self, configName=None):
"Configure this component"
try:
if not configName:
return self.__client.xmlrpc.configure()
else:
return self.__client.xmlrpc.configure(configName)
except:
self.__log.error(exc_string())
return None
def connect(self, connList=None):
"Connect this component with other components in a runset"
if not connList:
return self.__client.xmlrpc.connect()
cl = []
for conn in connList:
cl.append(conn.map())
return self.__client.xmlrpc.connect(cl)
def connectors(self):
return self.__connectors[:]
def createClient(self, host, port):
return RPCClient(host, port)
def createLogger(self, quiet):
return CnCLogger(quiet=quiet)
def createMBeanClient(self, host, mbeanPort):
return MBeanClient(self.fullName(), host, mbeanPort)
def events(self, subrunNumber):
"Get the number of events in the specified subrun"
try:
evts = self.__client.xmlrpc.getEvents(subrunNumber)
if type(evts) == str:
evts = long(evts[:-1])
return evts
except:
self.__log.error(exc_string())
return None
def forcedStop(self):
"Force component to stop running"
try:
return self.__client.xmlrpc.forcedStop()
except:
self.__log.error(exc_string())
return None
def getBeanFields(self, bean):
if self.__mbean is None:
return []
return self.__mbean.getBeanFields(bean)
def getBeanNames(self):
if self.__mbean is None:
return []
return self.__mbean.getBeanNames()
def getMultiBeanFields(self, name, fieldList):
if self.__mbean is None:
return {}
return self.__mbean.getAttributes(name, fieldList)
def getNonstoppedConnectorsString(self):
"""
Return string describing states of all connectors
which have not yet stopped
"""
try:
connStates = self.__client.xmlrpc.listConnectorStates()
except:
self.__log.error(exc_string())
connStates = []
csStr = None
for cs in connStates:
if cs["state"] == 'idle':
continue
if csStr is None:
csStr = '['
else:
csStr += ', '
csStr += '%s:%s' % (cs["type"], cs["state"])
if csStr is None:
csStr = ''
else:
csStr += ']'
return csStr
def getSingleBeanField(self, name, field):
if self.__mbean is None:
return None
return self.__mbean.get(name, field)
def host(self):
return self.__host
def id(self):
return self.__id
def isSource(self):
"Is this component a source of data?"
# XXX Hack for stringHubs which are sources but which confuse
# things by also reading requests from the eventBuilder
if self.isHub():
return True
for conn in self.__connectors:
if conn.isInput():
return False
return True
def listConnectorStates(self):
return self.__client.xmlrpc.listConnectorStates()
def logTo(self, logIP, logPort, liveIP, livePort):
"Send log messages to the specified host and port"
self.__log.openLog(logIP, logPort, liveIP, livePort)
if logIP is None:
logIP = ''
if logPort is None:
logPort = 0
if liveIP is None:
liveIP = ''
if livePort is None:
livePort = 0
self.__client.xmlrpc.logTo(logIP, logPort, liveIP, livePort)
infoStr = self.__client.xmlrpc.getVersionInfo()
self.__log.debug(("Version info: %(filename)s %(revision)s" +
" %(date)s %(time)s %(author)s %(release)s" +
" %(repo_rev)s") % get_version_info(infoStr))
def map(self):
|
return { "id" : self.__id,
"compName" : self.name(),
"compNum" : self.num(),
"host" : self.__host,
"rpcPort" : self.__port,
"mbeanPort" : self.__mbeanPort }
|
identifier_body
|
|
DAQClient.py
|
. If obj is a dict or list, recuse into it
converting all such masquerading strings. All other types are
unaltered. This pairs with the similarly named fix* methods in
icecube.daq.juggler.mbean.XMLRPCServer """
if type(obj) is dict:
for k in obj.keys():
obj[k] = cls.__unFixValue(obj[k])
elif type(obj) is list:
for i in xrange(0, len(obj)):
obj[i] = cls.__unFixValue(obj[i])
elif type(obj) is str:
try:
return int(obj)
except ValueError:
pass
return obj
def checkBeanField(self, bean, fld):
if bean not in self.__beanList:
msg = "Bean %s not in list of beans for %s" % \
(bean, self.__compName)
raise BeanFieldNotFoundException(msg)
if fld not in self.__beanFields[bean]:
msg = "Bean %s field %s not in list of bean fields for %s (%s)" % \
(bean, fld, self.__compName, str(self.__beanFields[bean]))
raise BeanFieldNotFoundException(msg)
def get(self, bean, fld):
self.checkBeanField(bean, fld)
return self.__unFixValue(self.__client.mbean.get(bean, fld))
def getAttributes(self, bean, fldList):
attrs = self.__client.mbean.getAttributes(bean, fldList)
if type(attrs) == dict and len(attrs) > 0:
for k in attrs.keys():
|
return attrs
def getBeanNames(self):
return self.__beanList
def getBeanFields(self, bean):
if bean not in self.__beanList:
msg = "Bean %s not in list of beans for %s" % \
(bean, self.__compName)
raise BeanFieldNotFoundException(msg)
return self.__beanFields[bean]
class ComponentName(object):
"DAQ component name"
def __init__(self, name, num):
self.__name = name
self.__num = num
def __repr__(self):
return self.fullName()
def fileName(self):
return '%s-%d' % (self.__name, self.__num)
def fullName(self):
if self.__num == 0 and self.__name[-3:].lower() != 'hub':
return self.__name
return '%s#%d' % (self.__name, self.__num)
def isBuilder(self):
"Is this an eventBuilder (or debugging fooBuilder)?"
return self.__name.endswith("Builder")
def isComponent(self, name, num=-1):
"Does this component have the specified name and number?"
return self.__name == name and (num < 0 or self.__num == num)
def isHub(self):
return self.__name.endswith("Hub")
def name(self):
return self.__name
def num(self):
return self.__num
class DAQClientException(Exception): pass
class DAQClient(ComponentName):
"""DAQ component
id - internal client ID
name - component name
num - component instance number
host - component host name
port - component port number
mbeanPort - component's MBean server port number
connectors - list of Connectors
client - XML-RPC client
deadCount - number of sequential failed pings
cmdOrder - order in which start/stop commands are issued
"""
# next component ID
#
ID = UniqueID()
# internal state indicating that the client hasn't answered
# some number of pings but has not been declared dead
#
STATE_MISSING = 'MIA'
# internal state indicating that the client is
# no longer responding to pings
#
STATE_DEAD = RunSet.STATE_DEAD
def __init__(self, name, num, host, port, mbeanPort, connectors,
quiet=False):
"""
DAQClient constructor
name - component name
num - component instance number
host - component host name
port - component port number
mbeanPort - component MBean port number
connectors - list of Connectors
"""
super(DAQClient, self).__init__(name, num)
self.__id = DAQClient.ID.next()
self.__host = host
self.__port = port
self.__mbeanPort = mbeanPort
self.__connectors = connectors
self.__deadCount = 0
self.__cmdOrder = None
self.__log = self.createLogger(quiet=quiet)
self.__client = self.createClient(host, port)
try:
self.__mbean = self.createMBeanClient(host, mbeanPort)
except:
self.__mbean = None
def __str__(self):
"String description"
if self.__port <= 0:
hpStr = ''
else:
hpStr = ' at %s:%d' % (self.__host, self.__port)
if self.__mbeanPort <= 0:
mbeanStr = ''
else:
mbeanStr = ' M#%d' % self.__mbeanPort
extraStr = ''
if self.__connectors and len(self.__connectors) > 0:
first = True
for c in self.__connectors:
if first:
extraStr += ' [' + str(c)
first = False
else:
extraStr += ' ' + str(c)
extraStr += ']'
return "ID#%d %s%s%s%s" % \
(self.__id, self.fullName(), hpStr, mbeanStr, extraStr)
def checkBeanField(self, bean, field):
if self.__mbean is not None:
self.__mbean.checkBeanField(bean, field)
def close(self):
self.__log.close()
def commitSubrun(self, subrunNum, latestTime):
"Start marking events with the subrun number"
try:
return self.__client.xmlrpc.commitSubrun(subrunNum, latestTime)
except:
self.__log.error(exc_string())
return None
def configure(self, configName=None):
"Configure this component"
try:
if not configName:
return self.__client.xmlrpc.configure()
else:
return self.__client.xmlrpc.configure(configName)
except:
self.__log.error(exc_string())
return None
def connect(self, connList=None):
"Connect this component with other components in a runset"
if not connList:
return self.__client.xmlrpc.connect()
cl = []
for conn in connList:
cl.append(conn.map())
return self.__client.xmlrpc.connect(cl)
def connectors(self):
return self.__connectors[:]
def createClient(self, host, port):
return RPCClient(host, port)
def createLogger(self, quiet):
return CnCLogger(quiet=quiet)
def createMBeanClient(self, host, mbeanPort):
return MBeanClient(self.fullName(), host, mbeanPort)
def events(self, subrunNumber):
"Get the number of events in the specified subrun"
try:
evts = self.__client.xmlrpc.getEvents(subrunNumber)
if type(evts) == str:
evts = long(evts[:-1])
return evts
except:
self.__log.error(exc_string())
return None
def forcedStop(self):
"Force component to stop running"
try:
return self.__client.xmlrpc.forcedStop()
except:
self.__log.error(exc_string())
return None
def getBeanFields(self, bean):
if self.__mbean is None:
return []
return self.__mbean.getBeanFields(bean)
def getBeanNames(self):
if self.__mbean is None:
return []
return self.__mbean.getBeanNames()
def getMultiBeanFields(self, name, fieldList):
if self.__mbean is None:
return {}
return self.__mbean.getAttributes(name, fieldList)
def getNonstoppedConnectorsString(self):
"""
Return string describing states of all connectors
which have not yet stopped
"""
try:
connStates = self.__client.xmlrpc.listConnectorStates()
except:
self.__log.error(exc_string())
connStates = []
csStr = None
for cs in connStates:
if cs["state"] == 'idle':
continue
if csStr is None:
csStr = '['
else:
csStr += ', '
csStr += '%s:%s' % (cs["type"], cs["state"])
if csStr is None:
csStr = ''
else:
csStr += ']'
return csStr
def getSingleBeanField(self, name, field):
if self.__mbean is None:
return None
return self.__mbean.get(name, field)
def host(self):
return self.__host
def id(self):
return self.__id
def isSource(self):
"Is this component a source of data?"
# XXX Hack for stringHubs which are sources but which confuse
# things by also
|
attrs[k] = self.__unFixValue(attrs[k])
|
conditional_block
|
DAQClient.py
|
ion. If obj is a dict or list, recuse into it
converting all such masquerading strings. All other types are
unaltered. This pairs with the similarly named fix* methods in
icecube.daq.juggler.mbean.XMLRPCServer """
if type(obj) is dict:
for k in obj.keys():
obj[k] = cls.__unFixValue(obj[k])
elif type(obj) is list:
for i in xrange(0, len(obj)):
obj[i] = cls.__unFixValue(obj[i])
elif type(obj) is str:
try:
return int(obj)
except ValueError:
pass
return obj
def checkBeanField(self, bean, fld):
if bean not in self.__beanList:
msg = "Bean %s not in list of beans for %s" % \
(bean, self.__compName)
raise BeanFieldNotFoundException(msg)
if fld not in self.__beanFields[bean]:
msg = "Bean %s field %s not in list of bean fields for %s (%s)" % \
(bean, fld, self.__compName, str(self.__beanFields[bean]))
raise BeanFieldNotFoundException(msg)
def get(self, bean, fld):
self.checkBeanField(bean, fld)
return self.__unFixValue(self.__client.mbean.get(bean, fld))
def getAttributes(self, bean, fldList):
attrs = self.__client.mbean.getAttributes(bean, fldList)
if type(attrs) == dict and len(attrs) > 0:
for k in attrs.keys():
attrs[k] = self.__unFixValue(attrs[k])
return attrs
def getBeanNames(self):
return self.__beanList
def getBeanFields(self, bean):
if bean not in self.__beanList:
msg = "Bean %s not in list of beans for %s" % \
(bean, self.__compName)
raise BeanFieldNotFoundException(msg)
return self.__beanFields[bean]
class ComponentName(object):
"DAQ component name"
def __init__(self, name, num):
self.__name = name
self.__num = num
def __repr__(self):
return self.fullName()
def fileName(self):
return '%s-%d' % (self.__name, self.__num)
def fullName(self):
if self.__num == 0 and self.__name[-3:].lower() != 'hub':
return self.__name
return '%s#%d' % (self.__name, self.__num)
def isBuilder(self):
"Is this an eventBuilder (or debugging fooBuilder)?"
return self.__name.endswith("Builder")
def isComponent(self, name, num=-1):
"Does this component have the specified name and number?"
return self.__name == name and (num < 0 or self.__num == num)
def isHub(self):
return self.__name.endswith("Hub")
def name(self):
return self.__name
def num(self):
return self.__num
class DAQClientException(Exception): pass
class DAQClient(ComponentName):
"""DAQ component
id - internal client ID
name - component name
num - component instance number
host - component host name
port - component port number
mbeanPort - component's MBean server port number
connectors - list of Connectors
client - XML-RPC client
deadCount - number of sequential failed pings
cmdOrder - order in which start/stop commands are issued
"""
# next component ID
#
ID = UniqueID()
# internal state indicating that the client hasn't answered
# some number of pings but has not been declared dead
#
STATE_MISSING = 'MIA'
# internal state indicating that the client is
# no longer responding to pings
#
STATE_DEAD = RunSet.STATE_DEAD
def __init__(self, name, num, host, port, mbeanPort, connectors,
quiet=False):
"""
DAQClient constructor
name - component name
num - component instance number
host - component host name
port - component port number
mbeanPort - component MBean port number
connectors - list of Connectors
"""
super(DAQClient, self).__init__(name, num)
self.__id = DAQClient.ID.next()
self.__host = host
self.__port = port
self.__mbeanPort = mbeanPort
self.__connectors = connectors
self.__deadCount = 0
self.__cmdOrder = None
self.__log = self.createLogger(quiet=quiet)
self.__client = self.createClient(host, port)
try:
self.__mbean = self.createMBeanClient(host, mbeanPort)
except:
self.__mbean = None
def __str__(self):
"String description"
if self.__port <= 0:
hpStr = ''
else:
hpStr = ' at %s:%d' % (self.__host, self.__port)
if self.__mbeanPort <= 0:
mbeanStr = ''
else:
mbeanStr = ' M#%d' % self.__mbeanPort
extraStr = ''
if self.__connectors and len(self.__connectors) > 0:
first = True
for c in self.__connectors:
if first:
extraStr += ' [' + str(c)
first = False
else:
extraStr += ' ' + str(c)
extraStr += ']'
return "ID#%d %s%s%s%s" % \
(self.__id, self.fullName(), hpStr, mbeanStr, extraStr)
def checkBeanField(self, bean, field):
if self.__mbean is not None:
self.__mbean.checkBeanField(bean, field)
def close(self):
self.__log.close()
def commitSubrun(self, subrunNum, latestTime):
"Start marking events with the subrun number"
try:
return self.__client.xmlrpc.commitSubrun(subrunNum, latestTime)
except:
self.__log.error(exc_string())
return None
def configure(self, configName=None):
"Configure this component"
try:
if not configName:
return self.__client.xmlrpc.configure()
else:
return self.__client.xmlrpc.configure(configName)
except:
self.__log.error(exc_string())
return None
def connect(self, connList=None):
"Connect this component with other components in a runset"
if not connList:
return self.__client.xmlrpc.connect()
cl = []
for conn in connList:
cl.append(conn.map())
return self.__client.xmlrpc.connect(cl)
def connectors(self):
return self.__connectors[:]
def createClient(self, host, port):
return RPCClient(host, port)
def createLogger(self, quiet):
return CnCLogger(quiet=quiet)
def createMBeanClient(self, host, mbeanPort):
return MBeanClient(self.fullName(), host, mbeanPort)
def events(self, subrunNumber):
"Get the number of events in the specified subrun"
try:
evts = self.__client.xmlrpc.getEvents(subrunNumber)
if type(evts) == str:
evts = long(evts[:-1])
return evts
except:
self.__log.error(exc_string())
return None
def forcedStop(self):
"Force component to stop running"
try:
return self.__client.xmlrpc.forcedStop()
except:
self.__log.error(exc_string())
return None
def getBeanFields(self, bean):
if self.__mbean is None:
return []
return self.__mbean.getBeanFields(bean)
def getBeanNames(self):
if self.__mbean is None:
return []
return self.__mbean.getBeanNames()
def getMultiBeanFields(self, name, fieldList):
if self.__mbean is None:
return {}
return self.__mbean.getAttributes(name, fieldList)
def getNonstoppedConnectorsString(self):
"""
Return string describing states of all connectors
which have not yet stopped
"""
try:
connStates = self.__client.xmlrpc.listConnectorStates()
except:
self.__log.error(exc_string())
connStates = []
csStr = None
for cs in connStates:
if cs["state"] == 'idle':
continue
if csStr is None:
csStr = '['
else:
csStr += ', '
csStr += '%s:%s' % (cs["type"], cs["state"])
if csStr is None:
csStr = ''
|
def getSingleBeanField(self, name, field):
if self.__mbean is None:
return None
return self.__mbean.get(name, field)
def host(self):
return self.__host
def id(self):
return self.__id
def isSource(self):
"Is this component a source of data?"
# XXX Hack for stringHubs which are sources but which confuse
# things by also
|
else:
csStr += ']'
return csStr
|
random_line_split
|
DAQClient.py
|
component name"
def __init__(self, name, num):
self.__name = name
self.__num = num
def __repr__(self):
return self.fullName()
def fileName(self):
return '%s-%d' % (self.__name, self.__num)
def fullName(self):
if self.__num == 0 and self.__name[-3:].lower() != 'hub':
return self.__name
return '%s#%d' % (self.__name, self.__num)
def isBuilder(self):
"Is this an eventBuilder (or debugging fooBuilder)?"
return self.__name.endswith("Builder")
def isComponent(self, name, num=-1):
"Does this component have the specified name and number?"
return self.__name == name and (num < 0 or self.__num == num)
def isHub(self):
return self.__name.endswith("Hub")
def name(self):
return self.__name
def num(self):
return self.__num
class DAQClientException(Exception): pass
class DAQClient(ComponentName):
"""DAQ component
id - internal client ID
name - component name
num - component instance number
host - component host name
port - component port number
mbeanPort - component's MBean server port number
connectors - list of Connectors
client - XML-RPC client
deadCount - number of sequential failed pings
cmdOrder - order in which start/stop commands are issued
"""
# next component ID
#
ID = UniqueID()
# internal state indicating that the client hasn't answered
# some number of pings but has not been declared dead
#
STATE_MISSING = 'MIA'
# internal state indicating that the client is
# no longer responding to pings
#
STATE_DEAD = RunSet.STATE_DEAD
def __init__(self, name, num, host, port, mbeanPort, connectors,
quiet=False):
"""
DAQClient constructor
name - component name
num - component instance number
host - component host name
port - component port number
mbeanPort - component MBean port number
connectors - list of Connectors
"""
super(DAQClient, self).__init__(name, num)
self.__id = DAQClient.ID.next()
self.__host = host
self.__port = port
self.__mbeanPort = mbeanPort
self.__connectors = connectors
self.__deadCount = 0
self.__cmdOrder = None
self.__log = self.createLogger(quiet=quiet)
self.__client = self.createClient(host, port)
try:
self.__mbean = self.createMBeanClient(host, mbeanPort)
except:
self.__mbean = None
def __str__(self):
"String description"
if self.__port <= 0:
hpStr = ''
else:
hpStr = ' at %s:%d' % (self.__host, self.__port)
if self.__mbeanPort <= 0:
mbeanStr = ''
else:
mbeanStr = ' M#%d' % self.__mbeanPort
extraStr = ''
if self.__connectors and len(self.__connectors) > 0:
first = True
for c in self.__connectors:
if first:
extraStr += ' [' + str(c)
first = False
else:
extraStr += ' ' + str(c)
extraStr += ']'
return "ID#%d %s%s%s%s" % \
(self.__id, self.fullName(), hpStr, mbeanStr, extraStr)
def checkBeanField(self, bean, field):
if self.__mbean is not None:
self.__mbean.checkBeanField(bean, field)
def close(self):
self.__log.close()
def commitSubrun(self, subrunNum, latestTime):
"Start marking events with the subrun number"
try:
return self.__client.xmlrpc.commitSubrun(subrunNum, latestTime)
except:
self.__log.error(exc_string())
return None
def configure(self, configName=None):
"Configure this component"
try:
if not configName:
return self.__client.xmlrpc.configure()
else:
return self.__client.xmlrpc.configure(configName)
except:
self.__log.error(exc_string())
return None
def connect(self, connList=None):
"Connect this component with other components in a runset"
if not connList:
return self.__client.xmlrpc.connect()
cl = []
for conn in connList:
cl.append(conn.map())
return self.__client.xmlrpc.connect(cl)
def connectors(self):
return self.__connectors[:]
def createClient(self, host, port):
return RPCClient(host, port)
def createLogger(self, quiet):
return CnCLogger(quiet=quiet)
def createMBeanClient(self, host, mbeanPort):
return MBeanClient(self.fullName(), host, mbeanPort)
def events(self, subrunNumber):
"Get the number of events in the specified subrun"
try:
evts = self.__client.xmlrpc.getEvents(subrunNumber)
if type(evts) == str:
evts = long(evts[:-1])
return evts
except:
self.__log.error(exc_string())
return None
def forcedStop(self):
"Force component to stop running"
try:
return self.__client.xmlrpc.forcedStop()
except:
self.__log.error(exc_string())
return None
def getBeanFields(self, bean):
if self.__mbean is None:
return []
return self.__mbean.getBeanFields(bean)
def getBeanNames(self):
if self.__mbean is None:
return []
return self.__mbean.getBeanNames()
def getMultiBeanFields(self, name, fieldList):
if self.__mbean is None:
return {}
return self.__mbean.getAttributes(name, fieldList)
def getNonstoppedConnectorsString(self):
"""
Return string describing states of all connectors
which have not yet stopped
"""
try:
connStates = self.__client.xmlrpc.listConnectorStates()
except:
self.__log.error(exc_string())
connStates = []
csStr = None
for cs in connStates:
if cs["state"] == 'idle':
continue
if csStr is None:
csStr = '['
else:
csStr += ', '
csStr += '%s:%s' % (cs["type"], cs["state"])
if csStr is None:
csStr = ''
else:
csStr += ']'
return csStr
def getSingleBeanField(self, name, field):
if self.__mbean is None:
return None
return self.__mbean.get(name, field)
def host(self):
return self.__host
def id(self):
return self.__id
def isSource(self):
"Is this component a source of data?"
# XXX Hack for stringHubs which are sources but which confuse
# things by also reading requests from the eventBuilder
if self.isHub():
return True
for conn in self.__connectors:
if conn.isInput():
return False
return True
def listConnectorStates(self):
return self.__client.xmlrpc.listConnectorStates()
def logTo(self, logIP, logPort, liveIP, livePort):
"Send log messages to the specified host and port"
self.__log.openLog(logIP, logPort, liveIP, livePort)
if logIP is None:
logIP = ''
if logPort is None:
logPort = 0
if liveIP is None:
liveIP = ''
if livePort is None:
livePort = 0
self.__client.xmlrpc.logTo(logIP, logPort, liveIP, livePort)
infoStr = self.__client.xmlrpc.getVersionInfo()
self.__log.debug(("Version info: %(filename)s %(revision)s" +
" %(date)s %(time)s %(author)s %(release)s" +
" %(repo_rev)s") % get_version_info(infoStr))
def map(self):
return { "id" : self.__id,
"compName" : self.name(),
"compNum" : self.num(),
"host" : self.__host,
"rpcPort" : self.__port,
"mbeanPort" : self.__mbeanPort }
def mbeanPort(self):
return self.__mbeanPort
def monitor(self):
"Return the monitoring value"
return self.state()
def order(self):
return self.__cmdOrder
def port(self):
return self.__port
def prepareSubrun(self, subrunNum):
"Start marking events as bogus in preparation for subrun"
try:
return self.__client.xmlrpc.prepareSubrun(subrunNum)
except:
self.__log.error(exc_string())
return None
def reset(self):
"Reset component back to the idle state"
self.__log.closeLog()
return self.__client.xmlrpc.reset()
def
|
resetLogging
|
identifier_name
|
|
lib.rs
|
. Furthermore, if
/// you need more control of the connection's parameters use [`Options::connect()`].
///
/// **Warning:** There are asynchronous errors that can happen during operation of NATS client.
/// To handle them, add handler for [`Options::error_callback()`].
///
/// # Examples
///
/// If no scheme is provided the `nats://` scheme is assumed. The default port is `4222`.
/// ```no_run
/// let nc = nats::connect("demo.nats.io")?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// It is possible to provide several URLs as a comma separated list.
/// ```no_run
/// let nc = nats::connect("demo.nats.io,tls://demo.nats.io:4443")?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// Alternatively, an array of strings can be passed.
/// ```no_run
/// # use nats::IntoServerList;
/// let nc = nats::connect(&["demo.nats.io", "tls://demo.nats.io:4443"])?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// Instead of using strings, [`ServerAddress`]es can be used directly as well. This is handy for
/// validating user input.
/// ```no_run
/// use nats::ServerAddress;
/// use std::io;
/// use structopt::StructOpt;
///
/// #[derive(Debug, StructOpt)]
/// struct Config {
/// #[structopt(short, long = "server", default_value = "demo.nats.io")]
/// servers: Vec<ServerAddress>,
/// }
///
/// fn main() -> io::Result<()> {
/// let config = Config::from_args();
/// let nc = nats::connect(config.servers)?;
/// Ok(())
/// }
/// ```
pub fn connect<I: IntoServerList>(nats_urls: I) -> io::Result<Connection> {
Options::new().connect(nats_urls)
}
impl Connection {
/// Connects on one or more NATS servers with the given options.
///
/// For more on how to use [`IntoServerList`] trait see [`crate::connect()`].
pub(crate) fn connect_with_options<I>(urls: I, options: Options) -> io::Result<Connection>
where
I: IntoServerList,
{
let urls = urls.into_server_list()?;
let client = Client::connect(urls, options)?;
client.flush(DEFAULT_FLUSH_TIMEOUT)?;
Ok(Connection(Arc::new(Inner { client })))
}
/// Create a subscription for the given NATS connection.
///
/// # Example
/// ```
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let sub = nc.subscribe("foo")?;
/// # Ok(())
/// # }
/// ```
pub fn subscribe(&self, subject: &str) -> io::Result<Subscription> {
self.do_subscribe(subject, None)
}
/// Create a queue subscription for the given NATS connection.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let sub = nc.queue_subscribe("foo", "production")?;
/// # Ok(())
/// # }
/// ```
pub fn queue_subscribe(&self, subject: &str, queue: &str) -> io::Result<Subscription> {
self.do_subscribe(subject, Some(queue))
}
/// Publish a message on the given subject.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.publish("foo", "Hello World!")?;
/// # Ok(())
/// # }
/// ```
pub fn publish(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<()> {
self.publish_with_reply_or_headers(subject, None, None, msg)
}
/// Publish a message on the given subject with a reply subject for
/// responses.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let reply = nc.new_inbox();
/// let rsub = nc.subscribe(&reply)?;
/// nc.publish_request("foo", &reply, "Help me!")?;
/// # Ok(())
/// # }
/// ```
pub fn publish_request(
&self,
subject: &str,
reply: &str,
msg: impl AsRef<[u8]>,
) -> io::Result<()> {
self.0
.client
.publish(subject, Some(reply), None, msg.as_ref())
}
/// Create a new globally unique inbox which can be used for replies.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let reply = nc.new_inbox();
/// let rsub = nc.subscribe(&reply)?;
/// # Ok(())
/// # }
/// ```
pub fn new_inbox(&self) -> String {
format!("_INBOX.{}", nuid::next())
}
/// Publish a message on the given subject as a request and receive the
/// response.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let resp = nc.request("foo", "Help me?")?;
/// # Ok(())
/// # }
/// ```
pub fn request(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, None, None, msg)
}
/// Publish a message on the given subject as a request and receive the
/// response. This call will return after the timeout duration if no
/// response is received.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let resp = nc.request_timeout("foo", "Help me?", std::time::Duration::from_secs(2))?;
/// # Ok(())
/// # }
/// ```
pub fn request_timeout(
&self,
subject: &str,
msg: impl AsRef<[u8]>,
timeout: Duration,
) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, None, Some(timeout), msg)
}
/// Publish a message with headers on the given subject as a request and receive the
/// response.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let mut headers = nats::HeaderMap::new();
/// headers.insert("X-key", "value".to_string());
/// let resp = nc.request_with_headers_or_timeout(
/// "foo",
/// Some(&headers),
/// Some(std::time::Duration::from_secs(2)),
/// "Help me?",
/// )?;
/// # Ok(())
/// # }
/// ```
pub fn request_with_headers(
&self,
subject: &str,
msg: impl AsRef<[u8]>,
headers: &HeaderMap,
) -> io::Result<Message> {
self.request_with_headers_or_timeout(subject, Some(headers), None, msg)
}
/// Publish a message on the given subject as a request and receive the
/// response. This call will return after the timeout duration if it was set to `Some` if no
/// response is received. It also allows passing headers.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// let mut headers = nats::HeaderMap::new();
/// headers.insert("X-key", "value".to_string());
/// let resp = nc.request_with_headers_or_timeout(
/// "foo",
/// Some(&headers),
/// Some(std::time::Duration::from_secs(2)),
/// "Help me?",
/// )?;
/// # Ok(())
/// # }
/// ```
pub fn
|
request_with_headers_or_timeout
|
identifier_name
|
|
lib.rs
|
unsafe_code,
unused,
unused_qualifications
)
)]
#![cfg_attr(feature = "fault_injection", deny(
// over time, consider enabling the following commented-out lints:
// clippy::else_if_without_else,
// clippy::indexing_slicing,
// clippy::multiple_crate_versions,
// clippy::missing_const_for_fn,
clippy::cast_lossless,
clippy::cast_possible_truncation,
clippy::cast_possible_wrap,
clippy::cast_precision_loss,
clippy::cast_sign_loss,
clippy::checked_conversions,
clippy::decimal_literal_representation,
clippy::doc_markdown,
clippy::empty_enum,
clippy::explicit_into_iter_loop,
clippy::explicit_iter_loop,
clippy::expl_impl_clone_on_copy,
clippy::fallible_impl_from,
clippy::filter_map_next,
clippy::float_arithmetic,
clippy::get_unwrap,
clippy::if_not_else,
clippy::inline_always,
clippy::invalid_upcast_comparisons,
clippy::items_after_statements,
clippy::manual_filter_map,
clippy::manual_find_map,
clippy::map_flatten,
clippy::map_unwrap_or,
clippy::match_same_arms,
clippy::maybe_infinite_iter,
clippy::mem_forget,
clippy::needless_borrow,
clippy::needless_continue,
clippy::needless_pass_by_value,
clippy::non_ascii_literal,
clippy::path_buf_push_overwrite,
clippy::print_stdout,
clippy::single_match_else,
clippy::string_add,
clippy::string_add_assign,
clippy::type_repetition_in_bounds,
clippy::unicode_not_nfc,
clippy::unimplemented,
clippy::unseparated_literal_suffix,
clippy::wildcard_dependencies,
clippy::wildcard_enum_match_arm,
))]
#![allow(
clippy::match_like_matches_macro,
clippy::await_holding_lock,
clippy::shadow_reuse,
clippy::shadow_same,
clippy::shadow_unrelated,
clippy::wildcard_enum_match_arm,
clippy::module_name_repetitions
)]
// As this is a deprecated client, we don't want warnings from new lints to make CI red.
#![allow(clippy::all)]
#![allow(warnings)]
/// Async-enabled NATS client.
pub mod asynk;
mod auth_utils;
mod client;
mod connect;
mod connector;
mod message;
mod options;
mod proto;
mod secure_wipe;
mod subscription;
/// Header constants and types.
pub mod header;
/// `JetStream` stream management and consumers.
pub mod jetstream;
#[cfg_attr(docsrs, doc(cfg(feature = "unstable")))]
pub mod kv;
#[cfg_attr(docsrs, doc(cfg(feature = "unstable")))]
pub mod object_store;
#[cfg(feature = "fault_injection")]
mod fault_injection;
#[cfg(feature = "fault_injection")]
use fault_injection::{inject_delay, inject_io_failure};
#[cfg(not(feature = "fault_injection"))]
fn inject_delay() {}
#[cfg(not(feature = "fault_injection"))]
fn inject_io_failure() -> io::Result<()> {
Ok(())
}
// comment out until we reach MSRV 1.54.0
// #[doc = include_str!("../docs/migration-guide-0.17.0.md")]
// #[derive(Copy, Clone)]
// pub struct Migration0170;
#[doc(hidden)]
#[deprecated(since = "0.6.0", note = "this has been renamed to `Options`.")]
pub type ConnectionOptions = Options;
#[doc(hidden)]
#[deprecated(since = "0.17.0", note = "this has been moved to `header::HeaderMap`.")]
pub type Headers = HeaderMap;
pub use header::HeaderMap;
use std::{
io::{self, Error, ErrorKind},
sync::Arc,
time::{Duration, Instant},
};
use lazy_static::lazy_static;
use regex::Regex;
pub use connector::{IntoServerList, ServerAddress};
pub use jetstream::JetStreamOptions;
pub use message::Message;
pub use options::Options;
pub use subscription::{Handler, Subscription};
/// A re-export of the `rustls` crate used in this crate,
/// for use in cases where manual client configurations
/// must be provided using `Options::tls_client_config`.
pub use rustls;
#[doc(hidden)]
pub use connect::ConnectInfo;
use client::Client;
use options::AuthStyle;
use secure_wipe::{SecureString, SecureVec};
const VERSION: &str = env!("CARGO_PKG_VERSION");
const LANG: &str = "rust";
const DEFAULT_FLUSH_TIMEOUT: Duration = Duration::from_secs(10);
lazy_static! {
static ref VERSION_RE: Regex = Regex::new(r#"\Av?([0-9]+)\.?([0-9]+)?\.?([0-9]+)?"#).unwrap();
}
/// Information sent by the server back to this client
/// during initial connection, and possibly again later.
#[allow(unused)]
#[derive(Debug, Default, Clone)]
pub struct ServerInfo {
/// The unique identifier of the NATS server.
pub server_id: String,
/// Generated Server Name.
pub server_name: String,
/// The host specified in the cluster parameter/options.
pub host: String,
/// The port number specified in the cluster parameter/options.
pub port: u16,
/// The version of the NATS server.
pub version: String,
/// If this is set, then the server should try to authenticate upon
/// connect.
pub auth_required: bool,
/// If this is set, then the server must authenticate using TLS.
pub tls_required: bool,
/// Maximum payload size that the server will accept.
pub max_payload: usize,
/// The protocol version in use.
pub proto: i8,
/// The server-assigned client ID. This may change during reconnection.
pub client_id: u64,
/// The version of golang the NATS server was built with.
pub go: String,
/// The nonce used for nkeys.
pub nonce: String,
/// A list of server urls that a client can connect to.
pub connect_urls: Vec<String>,
/// The client IP as known by the server.
pub client_ip: String,
/// Whether the server supports headers.
pub headers: bool,
/// Whether server goes into lame duck mode.
pub lame_duck_mode: bool,
}
impl ServerInfo {
fn parse(s: &str) -> Option<ServerInfo> {
let mut obj = json::parse(s).ok()?;
Some(ServerInfo {
server_id: obj["server_id"].take_string()?,
server_name: obj["server_name"].take_string().unwrap_or_default(),
host: obj["host"].take_string()?,
port: obj["port"].as_u16()?,
version: obj["version"].take_string()?,
auth_required: obj["auth_required"].as_bool().unwrap_or(false),
tls_required: obj["tls_required"].as_bool().unwrap_or(false),
max_payload: obj["max_payload"].as_usize()?,
proto: obj["proto"].as_i8()?,
client_id: obj["client_id"].as_u64()?,
go: obj["go"].take_string()?,
nonce: obj["nonce"].take_string().unwrap_or_default(),
connect_urls: obj["connect_urls"]
.members_mut()
.filter_map(|m| m.take_string())
.collect(),
client_ip: obj["client_ip"].take_string().unwrap_or_default(),
headers: obj["headers"].as_bool().unwrap_or(false),
lame_duck_mode: obj["ldm"].as_bool().unwrap_or(false),
})
}
}
/// A NATS connection.
#[derive(Clone, Debug)]
pub struct Connection(pub(crate) Arc<Inner>);
#[derive(Clone, Debug)]
struct Inner {
client: Client,
}
impl Drop for Inner {
fn drop(&mut self) {
self.client.shutdown();
}
}
/// Connect to one or more NATS servers at the given URLs.
///
/// The [`IntoServerList`] trait allows to pass URLs in various different formats. Furthermore, if
/// you need more control of the connection's parameters use [`Options::connect()`].
///
/// **Warning:** There are asynchronous errors that can happen during operation of NATS client.
/// To handle them, add handler for [`Options::error_callback()`].
///
/// # Examples
///
/// If no scheme is provided the `nats://` scheme is assumed. The default port is `4222`.
/// ```no_run
/// let nc = nats::connect("demo.nats.io")?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// It is possible to provide several URLs as a comma separated list.
/// ```no_run
/// let nc = nats::connect("demo.nats.io,tls://demo.nats.io:4443")?;
/// # Ok::<(), std::io::Error>(())
/// ```
///
/// Alternatively, an array of strings can be passed.
/// ```no_run
/// # use nats::IntoServerList;
/// let
|
missing_docs,
nonstandard_style,
rust_2018_idioms,
trivial_casts,
trivial_numeric_casts,
|
random_line_split
|
|
lib.rs
|
/// # nc.subscribe("foo")?.with_handler(move |m| { m.respond("ans=42")?; Ok(()) });
/// for msg in nc.request_multi("foo", "Help")?.iter().take(1) {}
/// # Ok(())
/// # }
/// ```
pub fn request_multi(&self, subject: &str, msg: impl AsRef<[u8]>) -> io::Result<Subscription> {
// Publish a request.
let reply = self.new_inbox();
let sub = self.subscribe(&reply)?;
self.publish_with_reply_or_headers(subject, Some(reply.as_str()), None, msg)?;
// Return the subscription.
Ok(sub)
}
/// Flush a NATS connection by sending a `PING` protocol and waiting for the
/// responding `PONG`. Will fail with `TimedOut` if the server does not
/// respond with in 10 seconds. Will fail with `NotConnected` if the
/// server is not currently connected. Will fail with `BrokenPipe` if
/// the connection to the server is lost.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.flush()?;
/// # Ok(())
/// # }
/// ```
pub fn flush(&self) -> io::Result<()> {
self.flush_timeout(DEFAULT_FLUSH_TIMEOUT)
}
/// Flush a NATS connection by sending a `PING` protocol and waiting for the
/// responding `PONG`. Will fail with `TimedOut` if the server takes
/// longer than this duration to respond. Will fail with `NotConnected`
/// if the server is not currently connected. Will fail with
/// `BrokenPipe` if the connection to the server is lost.
///
/// # Example
/// ```no_run
/// # use std::time::Duration;
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.flush_timeout(Duration::from_secs(5))?;
/// # Ok(())
/// # }
/// ```
pub fn flush_timeout(&self, duration: Duration) -> io::Result<()> {
self.0.client.flush(duration)
}
/// Close a NATS connection. All clones of
/// this `Connection` will also be closed,
/// as the backing IO threads are shared.
///
/// If the client is currently connected
/// to a server, the outbound write buffer
/// will be flushed in the process of
/// shutting down.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// nc.close();
/// # Ok(())
/// # }
/// ```
pub fn close(self) {
self.0.client.flush(DEFAULT_FLUSH_TIMEOUT).ok();
self.0.client.close();
}
/// Calculates the round trip time between this client and the server,
/// if the server is currently connected. Fails with `TimedOut` if
/// the server takes more than 10 seconds to respond.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// println!("server rtt: {:?}", nc.rtt());
/// # Ok(())
/// # }
/// ```
pub fn rtt(&self) -> io::Result<Duration> {
let start = Instant::now();
self.flush()?;
Ok(start.elapsed())
}
/// Returns true if the version is compatible with the version components.
pub fn is_server_compatible_version(&self, major: i64, minor: i64, patch: i64) -> bool {
let server_info = self.0.client.server_info();
let server_version_captures = VERSION_RE.captures(&server_info.version).unwrap();
let server_major = server_version_captures
.get(1)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
let server_minor = server_version_captures
.get(2)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
let server_patch = server_version_captures
.get(3)
.map(|m| m.as_str().parse::<i64>().unwrap())
.unwrap();
if server_major < major
|| (server_major == major && server_minor < minor)
|| (server_major == major && server_minor == minor && server_patch < patch)
{
return false;
}
true
}
/// Returns the client IP as known by the server.
/// Supported as of server version 2.1.6.
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// println!("ip: {:?}", nc.client_ip());
/// # Ok(())
/// # }
/// ```
pub fn client_ip(&self) -> io::Result<std::net::IpAddr> {
let info = self.0.client.server_info();
match info.client_ip.as_str() {
"" => Err(Error::new(
ErrorKind::Other,
&*format!(
"client_ip was not provided by the server. It is \
supported on servers above version 2.1.6. The server \
version is {}",
info.version
),
)),
ip => match ip.parse() {
Ok(addr) => Ok(addr),
Err(_) => Err(Error::new(
ErrorKind::InvalidData,
&*format!(
"client_ip provided by the server cannot be parsed. \
The server provided IP: {}",
info.client_ip
),
)),
},
}
}
/// Returns the client ID as known by the most recently connected server.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// println!("ip: {:?}", nc.client_id());
/// # Ok(())
/// # }
/// ```
pub fn client_id(&self) -> u64 {
self.0.client.server_info().client_id
}
/// Send an unsubscription for all subs then flush the connection, allowing
/// any unprocessed messages to be handled by a handler function if one
/// is configured.
///
/// After the flush returns, we know that a round-trip to the server has
/// happened after it received our unsubscription, so we shut down the
/// subscriber afterwards.
///
/// A similar method exists for the `Subscription` struct which will drain
/// a single `Subscription` without shutting down the entire connection
/// afterward.
///
/// # Example
/// ```no_run
/// # use std::sync::{Arc, atomic::{AtomicBool, Ordering::SeqCst}};
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let received = Arc::new(AtomicBool::new(false));
/// let received_2 = received.clone();
///
/// nc.subscribe("test.drain")?.with_handler(move |m| {
/// received_2.store(true, SeqCst);
/// Ok(())
/// });
///
/// nc.publish("test.drain", "message")?;
/// nc.drain()?;
///
/// # std::thread::sleep(std::time::Duration::from_secs(1));
///
/// assert!(received.load(SeqCst));
///
/// # Ok(())
/// # }
/// ```
pub fn drain(&self) -> io::Result<()> {
self.0.client.flush(DEFAULT_FLUSH_TIMEOUT)?;
self.0.client.close();
Ok(())
}
/// Publish a message which may have a reply subject or headers set.
///
/// # Example
/// ```no_run
/// # fn main() -> std::io::Result<()> {
/// # let nc = nats::connect("demo.nats.io")?;
/// let sub = nc.subscribe("foo.headers")?;
/// let headers = [("header1", "value1"), ("header2", "value2")]
/// .iter()
/// .collect();
/// let reply_to = None;
/// nc.publish_with_reply_or_headers("foo.headers", reply_to, Some(&headers), "Hello World!")?;
/// nc.flush()?;
/// let message = sub.next_timeout(std::time::Duration::from_secs(2)).unwrap();
/// assert_eq!(message.headers.unwrap().len(), 2);
/// # Ok(())
/// # }
/// ```
pub fn publish_with_reply_or_headers(
&self,
subject: &str,
reply: Option<&str>,
headers: Option<&HeaderMap>,
msg: impl AsRef<[u8]>,
) -> io::Result<()>
|
{
self.0.client.publish(subject, reply, headers, msg.as_ref())
}
|
identifier_body
|
|
prebuilt.go
|
(p *prebuiltLibraryLinker) linkerDeps(ctx DepsContext, deps Deps) Deps {
return p.libraryDecorator.linkerDeps(ctx, deps)
}
func (p *prebuiltLibraryLinker) linkerFlags(ctx ModuleContext, flags Flags) Flags {
return flags
}
func (p *prebuiltLibraryLinker) linkerProps() []interface{} {
return p.libraryDecorator.linkerProps()
}
func (p *prebuiltLibraryLinker) link(ctx ModuleContext,
flags Flags, deps PathDeps, objs Objects) android.Path {
p.libraryDecorator.flagExporter.exportIncludes(ctx)
p.libraryDecorator.flagExporter.reexportDirs(deps.ReexportedDirs...)
p.libraryDecorator.flagExporter.reexportSystemDirs(deps.ReexportedSystemDirs...)
p.libraryDecorator.flagExporter.reexportFlags(deps.ReexportedFlags...)
p.libraryDecorator.flagExporter.reexportDeps(deps.ReexportedDeps...)
p.libraryDecorator.flagExporter.addExportedGeneratedHeaders(deps.ReexportedGeneratedHeaders...)
p.libraryDecorator.flagExporter.setProvider(ctx)
// TODO(ccross): verify shared library dependencies
srcs := p.prebuiltSrcs(ctx)
if len(srcs) > 0 {
builderFlags := flagsToBuilderFlags(flags)
if len(srcs) > 1 {
ctx.PropertyErrorf("srcs", "multiple prebuilt source files")
return nil
}
p.libraryDecorator.exportVersioningMacroIfNeeded(ctx)
in := android.PathForModuleSrc(ctx, srcs[0])
if p.static() {
depSet := android.NewDepSetBuilder(android.TOPOLOGICAL).Direct(in).Build()
ctx.SetProvider(StaticLibraryInfoProvider, StaticLibraryInfo{
StaticLibrary: in,
TransitiveStaticLibrariesForOrdering: depSet,
})
return in
}
if p.shared() {
p.unstrippedOutputFile = in
libName := p.libraryDecorator.getLibName(ctx) + flags.Toolchain.ShlibSuffix()
outputFile := android.PathForModuleOut(ctx, libName)
var implicits android.Paths
if p.stripper.NeedsStrip(ctx) {
stripFlags := flagsToStripFlags(flags)
stripped := android.PathForModuleOut(ctx, "stripped", libName)
p.stripper.StripExecutableOrSharedLib(ctx, in, stripped, stripFlags)
in = stripped
}
// Optimize out relinking against shared libraries whose interface hasn't changed by
// depending on a table of contents file instead of the library itself.
tocFile := android.PathForModuleOut(ctx, libName+".toc")
p.tocFile = android.OptionalPathForPath(tocFile)
transformSharedObjectToToc(ctx, outputFile, tocFile, builderFlags)
if ctx.Windows() && p.properties.Windows_import_lib != nil {
// Consumers of this library actually links to the import library in build
// time and dynamically links to the DLL in run time. i.e.
// a.exe <-- static link --> foo.lib <-- dynamic link --> foo.dll
importLibSrc := android.PathForModuleSrc(ctx, String(p.properties.Windows_import_lib))
importLibName := p.libraryDecorator.getLibName(ctx) + ".lib"
importLibOutputFile := android.PathForModuleOut(ctx, importLibName)
implicits = append(implicits, importLibOutputFile)
ctx.Build(pctx, android.BuildParams{
Rule: android.Cp,
Description: "prebuilt import library",
Input: importLibSrc,
Output: importLibOutputFile,
Args: map[string]string{
"cpFlags": "-L",
},
})
}
ctx.Build(pctx, android.BuildParams{
Rule: android.Cp,
Description: "prebuilt shared library",
Implicits: implicits,
Input: in,
Output: outputFile,
Args: map[string]string{
"cpFlags": "-L",
},
})
ctx.SetProvider(SharedLibraryInfoProvider, SharedLibraryInfo{
SharedLibrary: outputFile,
UnstrippedSharedLibrary: p.unstrippedOutputFile,
Target: ctx.Target(),
TableOfContents: p.tocFile,
})
return outputFile
}
}
if p.header() {
ctx.SetProvider(HeaderLibraryInfoProvider, HeaderLibraryInfo{})
return nil
}
return nil
}
func (p *prebuiltLibraryLinker) prebuiltSrcs(ctx android.BaseModuleContext) []string {
sanitize := ctx.Module().(*Module).sanitize
srcs := p.properties.Srcs
srcs = append(srcs, srcsForSanitizer(sanitize, p.properties.Sanitized)...)
if p.static() {
srcs = append(srcs, p.libraryDecorator.StaticProperties.Static.Srcs...)
srcs = append(srcs, srcsForSanitizer(sanitize, p.libraryDecorator.StaticProperties.Static.Sanitized)...)
}
if p.shared() {
srcs = append(srcs, p.libraryDecorator.SharedProperties.Shared.Srcs...)
srcs = append(srcs, srcsForSanitizer(sanitize, p.libraryDecorator.SharedProperties.Shared.Sanitized)...)
}
return srcs
}
func (p *prebuiltLibraryLinker) shared() bool {
return p.libraryDecorator.shared()
}
func (p *prebuiltLibraryLinker) nativeCoverage() bool {
return false
}
func (p *prebuiltLibraryLinker) disablePrebuilt() {
p.properties.Srcs = nil
}
// Implements versionedInterface
func (p *prebuiltLibraryLinker)
|
(name string) string {
return strings.TrimPrefix(name, "prebuilt_")
}
func NewPrebuiltLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewLibrary(hod)
module.compiler = nil
prebuilt := &prebuiltLibraryLinker{
libraryDecorator: library,
}
module.linker = prebuilt
module.library = prebuilt
module.AddProperties(&prebuilt.properties)
srcsSupplier := func(ctx android.BaseModuleContext, _ android.Module) []string {
return prebuilt.prebuiltSrcs(ctx)
}
android.InitPrebuiltModuleWithSrcSupplier(module, srcsSupplier, "srcs")
// Prebuilt libraries can be used in SDKs.
android.InitSdkAwareModule(module)
return module, library
}
// cc_prebuilt_library installs a precompiled shared library that are
// listed in the srcs property in the device's directory.
func PrebuiltLibraryFactory() android.Module {
module, _ := NewPrebuiltLibrary(android.HostAndDeviceSupported)
// Prebuilt shared libraries can be included in APEXes
android.InitApexModule(module)
return module.Init()
}
// cc_prebuilt_library_shared installs a precompiled shared library that are
// listed in the srcs property in the device's directory.
func PrebuiltSharedLibraryFactory() android.Module {
module, _ := NewPrebuiltSharedLibrary(android.HostAndDeviceSupported)
return module.Init()
}
// cc_prebuilt_test_library_shared installs a precompiled shared library
// to be used as a data dependency of a test-related module (such as cc_test, or
// cc_test_library).
func PrebuiltSharedTestLibraryFactory() android.Module {
module, library := NewPrebuiltLibrary(android.HostAndDeviceSupported)
library.BuildOnlyShared()
library.baseInstaller = NewTestInstaller()
return module.Init()
}
func NewPrebuiltSharedLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewPrebuiltLibrary(hod)
library.BuildOnlyShared()
// Prebuilt shared libraries can be included in APEXes
android.InitApexModule(module)
return module, library
}
// cc_prebuilt_library_static installs a precompiled static library that are
// listed in the srcs property in the device's directory.
func PrebuiltStaticLibraryFactory() android.Module {
module, _ := NewPrebuiltStaticLibrary(android.HostAndDeviceSupported)
return module.Init()
}
func NewPrebuiltStaticLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewPrebuiltLibrary(hod)
library.BuildOnlyStatic()
module.bazelHandler = &prebuiltStaticLibraryBazelHandler{module: module, library: library}
return module, library
}
type prebuiltObjectProperties struct {
Srcs []string `android:"path,arch_variant"`
}
type prebuiltObjectLinker struct {
android.Prebuilt
objectLinker
properties prebuiltObjectProperties
}
type prebuiltStaticLibraryBazelHandler struct {
bazelHandler
module *Module
library *libraryDecorator
}
func (h *prebuiltStaticLibraryBazelHandler) generateBazelBuildActions(ctx android.ModuleContext, label string) bool {
bazelCtx := ctx.Config().BazelContext
ccInfo, ok, err := bazelCtx.GetCcInfo(label, ctx.Arch().ArchType)
if err != nil {
ctx.ModuleErrorf("Error getting Bazel CcInfo: %s", err)
}
if !ok {
return false
}
staticLibs := ccInfo.CcStaticLibraryFiles
if len(staticLibs) > 1 {
ctx.ModuleErrorf("expected 1 static library from bazel target %q, got %s", label, staticLibs)
return false
}
// TODO(b/18454351
|
implementationModuleName
|
identifier_name
|
prebuilt.go
|
(p *prebuiltLibraryLinker) linkerDeps(ctx DepsContext, deps Deps) Deps {
return p.libraryDecorator.linkerDeps(ctx, deps)
}
func (p *prebuiltLibraryLinker) linkerFlags(ctx ModuleContext, flags Flags) Flags {
return flags
}
func (p *prebuiltLibraryLinker) linkerProps() []interface{} {
return p.libraryDecorator.linkerProps()
}
func (p *prebuiltLibraryLinker) link(ctx ModuleContext,
flags Flags, deps PathDeps, objs Objects) android.Path {
|
p.libraryDecorator.flagExporter.reexportDeps(deps.ReexportedDeps...)
p.libraryDecorator.flagExporter.addExportedGeneratedHeaders(deps.ReexportedGeneratedHeaders...)
p.libraryDecorator.flagExporter.setProvider(ctx)
// TODO(ccross): verify shared library dependencies
srcs := p.prebuiltSrcs(ctx)
if len(srcs) > 0 {
builderFlags := flagsToBuilderFlags(flags)
if len(srcs) > 1 {
ctx.PropertyErrorf("srcs", "multiple prebuilt source files")
return nil
}
p.libraryDecorator.exportVersioningMacroIfNeeded(ctx)
in := android.PathForModuleSrc(ctx, srcs[0])
if p.static() {
depSet := android.NewDepSetBuilder(android.TOPOLOGICAL).Direct(in).Build()
ctx.SetProvider(StaticLibraryInfoProvider, StaticLibraryInfo{
StaticLibrary: in,
TransitiveStaticLibrariesForOrdering: depSet,
})
return in
}
if p.shared() {
p.unstrippedOutputFile = in
libName := p.libraryDecorator.getLibName(ctx) + flags.Toolchain.ShlibSuffix()
outputFile := android.PathForModuleOut(ctx, libName)
var implicits android.Paths
if p.stripper.NeedsStrip(ctx) {
stripFlags := flagsToStripFlags(flags)
stripped := android.PathForModuleOut(ctx, "stripped", libName)
p.stripper.StripExecutableOrSharedLib(ctx, in, stripped, stripFlags)
in = stripped
}
// Optimize out relinking against shared libraries whose interface hasn't changed by
// depending on a table of contents file instead of the library itself.
tocFile := android.PathForModuleOut(ctx, libName+".toc")
p.tocFile = android.OptionalPathForPath(tocFile)
transformSharedObjectToToc(ctx, outputFile, tocFile, builderFlags)
if ctx.Windows() && p.properties.Windows_import_lib != nil {
// Consumers of this library actually links to the import library in build
// time and dynamically links to the DLL in run time. i.e.
// a.exe <-- static link --> foo.lib <-- dynamic link --> foo.dll
importLibSrc := android.PathForModuleSrc(ctx, String(p.properties.Windows_import_lib))
importLibName := p.libraryDecorator.getLibName(ctx) + ".lib"
importLibOutputFile := android.PathForModuleOut(ctx, importLibName)
implicits = append(implicits, importLibOutputFile)
ctx.Build(pctx, android.BuildParams{
Rule: android.Cp,
Description: "prebuilt import library",
Input: importLibSrc,
Output: importLibOutputFile,
Args: map[string]string{
"cpFlags": "-L",
},
})
}
ctx.Build(pctx, android.BuildParams{
Rule: android.Cp,
Description: "prebuilt shared library",
Implicits: implicits,
Input: in,
Output: outputFile,
Args: map[string]string{
"cpFlags": "-L",
},
})
ctx.SetProvider(SharedLibraryInfoProvider, SharedLibraryInfo{
SharedLibrary: outputFile,
UnstrippedSharedLibrary: p.unstrippedOutputFile,
Target: ctx.Target(),
TableOfContents: p.tocFile,
})
return outputFile
}
}
if p.header() {
ctx.SetProvider(HeaderLibraryInfoProvider, HeaderLibraryInfo{})
return nil
}
return nil
}
func (p *prebuiltLibraryLinker) prebuiltSrcs(ctx android.BaseModuleContext) []string {
sanitize := ctx.Module().(*Module).sanitize
srcs := p.properties.Srcs
srcs = append(srcs, srcsForSanitizer(sanitize, p.properties.Sanitized)...)
if p.static() {
srcs = append(srcs, p.libraryDecorator.StaticProperties.Static.Srcs...)
srcs = append(srcs, srcsForSanitizer(sanitize, p.libraryDecorator.StaticProperties.Static.Sanitized)...)
}
if p.shared() {
srcs = append(srcs, p.libraryDecorator.SharedProperties.Shared.Srcs...)
srcs = append(srcs, srcsForSanitizer(sanitize, p.libraryDecorator.SharedProperties.Shared.Sanitized)...)
}
return srcs
}
func (p *prebuiltLibraryLinker) shared() bool {
return p.libraryDecorator.shared()
}
func (p *prebuiltLibraryLinker) nativeCoverage() bool {
return false
}
func (p *prebuiltLibraryLinker) disablePrebuilt() {
p.properties.Srcs = nil
}
// Implements versionedInterface
func (p *prebuiltLibraryLinker) implementationModuleName(name string) string {
return strings.TrimPrefix(name, "prebuilt_")
}
func NewPrebuiltLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewLibrary(hod)
module.compiler = nil
prebuilt := &prebuiltLibraryLinker{
libraryDecorator: library,
}
module.linker = prebuilt
module.library = prebuilt
module.AddProperties(&prebuilt.properties)
srcsSupplier := func(ctx android.BaseModuleContext, _ android.Module) []string {
return prebuilt.prebuiltSrcs(ctx)
}
android.InitPrebuiltModuleWithSrcSupplier(module, srcsSupplier, "srcs")
// Prebuilt libraries can be used in SDKs.
android.InitSdkAwareModule(module)
return module, library
}
// cc_prebuilt_library installs a precompiled shared library that are
// listed in the srcs property in the device's directory.
func PrebuiltLibraryFactory() android.Module {
module, _ := NewPrebuiltLibrary(android.HostAndDeviceSupported)
// Prebuilt shared libraries can be included in APEXes
android.InitApexModule(module)
return module.Init()
}
// cc_prebuilt_library_shared installs a precompiled shared library that are
// listed in the srcs property in the device's directory.
func PrebuiltSharedLibraryFactory() android.Module {
module, _ := NewPrebuiltSharedLibrary(android.HostAndDeviceSupported)
return module.Init()
}
// cc_prebuilt_test_library_shared installs a precompiled shared library
// to be used as a data dependency of a test-related module (such as cc_test, or
// cc_test_library).
func PrebuiltSharedTestLibraryFactory() android.Module {
module, library := NewPrebuiltLibrary(android.HostAndDeviceSupported)
library.BuildOnlyShared()
library.baseInstaller = NewTestInstaller()
return module.Init()
}
func NewPrebuiltSharedLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewPrebuiltLibrary(hod)
library.BuildOnlyShared()
// Prebuilt shared libraries can be included in APEXes
android.InitApexModule(module)
return module, library
}
// cc_prebuilt_library_static installs a precompiled static library that are
// listed in the srcs property in the device's directory.
func PrebuiltStaticLibraryFactory() android.Module {
module, _ := NewPrebuiltStaticLibrary(android.HostAndDeviceSupported)
return module.Init()
}
func NewPrebuiltStaticLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewPrebuiltLibrary(hod)
library.BuildOnlyStatic()
module.bazelHandler = &prebuiltStaticLibraryBazelHandler{module: module, library: library}
return module, library
}
type prebuiltObjectProperties struct {
Srcs []string `android:"path,arch_variant"`
}
type prebuiltObjectLinker struct {
android.Prebuilt
objectLinker
properties prebuiltObjectProperties
}
type prebuiltStaticLibraryBazelHandler struct {
bazelHandler
module *Module
library *libraryDecorator
}
func (h *prebuiltStaticLibraryBazelHandler) generateBazelBuildActions(ctx android.ModuleContext, label string) bool {
bazelCtx := ctx.Config().BazelContext
ccInfo, ok, err := bazelCtx.GetCcInfo(label, ctx.Arch().ArchType)
if err != nil {
ctx.ModuleErrorf("Error getting Bazel CcInfo: %s", err)
}
if !ok {
return false
}
staticLibs := ccInfo.CcStaticLibraryFiles
if len(staticLibs) > 1 {
ctx.ModuleErrorf("expected 1 static library from bazel target %q, got %s", label, staticLibs)
return false
}
// TODO(b/18454351
|
p.libraryDecorator.flagExporter.exportIncludes(ctx)
p.libraryDecorator.flagExporter.reexportDirs(deps.ReexportedDirs...)
p.libraryDecorator.flagExporter.reexportSystemDirs(deps.ReexportedSystemDirs...)
p.libraryDecorator.flagExporter.reexportFlags(deps.ReexportedFlags...)
|
random_line_split
|
prebuilt.go
|
func (p *prebuiltLibraryLinker) linkerDeps(ctx DepsContext, deps Deps) Deps {
return p.libraryDecorator.linkerDeps(ctx, deps)
}
func (p *prebuiltLibraryLinker) linkerFlags(ctx ModuleContext, flags Flags) Flags {
return flags
}
func (p *prebuiltLibraryLinker) linkerProps() []interface{} {
return p.libraryDecorator.linkerProps()
}
func (p *prebuiltLibraryLinker) link(ctx ModuleContext,
flags Flags, deps PathDeps, objs Objects) android.Path {
p.libraryDecorator.flagExporter.exportIncludes(ctx)
p.libraryDecorator.flagExporter.reexportDirs(deps.ReexportedDirs...)
p.libraryDecorator.flagExporter.reexportSystemDirs(deps.ReexportedSystemDirs...)
p.libraryDecorator.flagExporter.reexportFlags(deps.ReexportedFlags...)
p.libraryDecorator.flagExporter.reexportDeps(deps.ReexportedDeps...)
p.libraryDecorator.flagExporter.addExportedGeneratedHeaders(deps.ReexportedGeneratedHeaders...)
p.libraryDecorator.flagExporter.setProvider(ctx)
// TODO(ccross): verify shared library dependencies
srcs := p.prebuiltSrcs(ctx)
if len(srcs) > 0 {
builderFlags := flagsToBuilderFlags(flags)
if len(srcs) > 1 {
ctx.PropertyErrorf("srcs", "multiple prebuilt source files")
return nil
}
p.libraryDecorator.exportVersioningMacroIfNeeded(ctx)
in := android.PathForModuleSrc(ctx, srcs[0])
if p.static() {
depSet := android.NewDepSetBuilder(android.TOPOLOGICAL).Direct(in).Build()
ctx.SetProvider(StaticLibraryInfoProvider, StaticLibraryInfo{
StaticLibrary: in,
TransitiveStaticLibrariesForOrdering: depSet,
})
return in
}
if p.shared() {
p.unstrippedOutputFile = in
libName := p.libraryDecorator.getLibName(ctx) + flags.Toolchain.ShlibSuffix()
outputFile := android.PathForModuleOut(ctx, libName)
var implicits android.Paths
if p.stripper.NeedsStrip(ctx) {
stripFlags := flagsToStripFlags(flags)
stripped := android.PathForModuleOut(ctx, "stripped", libName)
p.stripper.StripExecutableOrSharedLib(ctx, in, stripped, stripFlags)
in = stripped
}
// Optimize out relinking against shared libraries whose interface hasn't changed by
// depending on a table of contents file instead of the library itself.
tocFile := android.PathForModuleOut(ctx, libName+".toc")
p.tocFile = android.OptionalPathForPath(tocFile)
transformSharedObjectToToc(ctx, outputFile, tocFile, builderFlags)
if ctx.Windows() && p.properties.Windows_import_lib != nil {
// Consumers of this library actually links to the import library in build
// time and dynamically links to the DLL in run time. i.e.
// a.exe <-- static link --> foo.lib <-- dynamic link --> foo.dll
importLibSrc := android.PathForModuleSrc(ctx, String(p.properties.Windows_import_lib))
importLibName := p.libraryDecorator.getLibName(ctx) + ".lib"
importLibOutputFile := android.PathForModuleOut(ctx, importLibName)
implicits = append(implicits, importLibOutputFile)
ctx.Build(pctx, android.BuildParams{
Rule: android.Cp,
Description: "prebuilt import library",
Input: importLibSrc,
Output: importLibOutputFile,
Args: map[string]string{
"cpFlags": "-L",
},
})
}
ctx.Build(pctx, android.BuildParams{
Rule: android.Cp,
Description: "prebuilt shared library",
Implicits: implicits,
Input: in,
Output: outputFile,
Args: map[string]string{
"cpFlags": "-L",
},
})
ctx.SetProvider(SharedLibraryInfoProvider, SharedLibraryInfo{
SharedLibrary: outputFile,
UnstrippedSharedLibrary: p.unstrippedOutputFile,
Target: ctx.Target(),
TableOfContents: p.tocFile,
})
return outputFile
}
}
if p.header() {
ctx.SetProvider(HeaderLibraryInfoProvider, HeaderLibraryInfo{})
return nil
}
return nil
}
func (p *prebuiltLibraryLinker) prebuiltSrcs(ctx android.BaseModuleContext) []string {
sanitize := ctx.Module().(*Module).sanitize
srcs := p.properties.Srcs
srcs = append(srcs, srcsForSanitizer(sanitize, p.properties.Sanitized)...)
if p.static() {
srcs = append(srcs, p.libraryDecorator.StaticProperties.Static.Srcs...)
srcs = append(srcs, srcsForSanitizer(sanitize, p.libraryDecorator.StaticProperties.Static.Sanitized)...)
}
if p.shared() {
srcs = append(srcs, p.libraryDecorator.SharedProperties.Shared.Srcs...)
srcs = append(srcs, srcsForSanitizer(sanitize, p.libraryDecorator.SharedProperties.Shared.Sanitized)...)
}
return srcs
}
func (p *prebuiltLibraryLinker) shared() bool {
return p.libraryDecorator.shared()
}
func (p *prebuiltLibraryLinker) nativeCoverage() bool {
return false
}
func (p *prebuiltLibraryLinker) disablePrebuilt() {
p.properties.Srcs = nil
}
// Implements versionedInterface
func (p *prebuiltLibraryLinker) implementationModuleName(name string) string {
return strings.TrimPrefix(name, "prebuilt_")
}
func NewPrebuiltLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewLibrary(hod)
module.compiler = nil
prebuilt := &prebuiltLibraryLinker{
libraryDecorator: library,
}
module.linker = prebuilt
module.library = prebuilt
module.AddProperties(&prebuilt.properties)
srcsSupplier := func(ctx android.BaseModuleContext, _ android.Module) []string {
return prebuilt.prebuiltSrcs(ctx)
}
android.InitPrebuiltModuleWithSrcSupplier(module, srcsSupplier, "srcs")
// Prebuilt libraries can be used in SDKs.
android.InitSdkAwareModule(module)
return module, library
}
// cc_prebuilt_library installs a precompiled shared library that are
// listed in the srcs property in the device's directory.
func PrebuiltLibraryFactory() android.Module {
module, _ := NewPrebuiltLibrary(android.HostAndDeviceSupported)
// Prebuilt shared libraries can be included in APEXes
android.InitApexModule(module)
return module.Init()
}
// cc_prebuilt_library_shared installs a precompiled shared library that are
// listed in the srcs property in the device's directory.
func PrebuiltSharedLibraryFactory() android.Module {
module, _ := NewPrebuiltSharedLibrary(android.HostAndDeviceSupported)
return module.Init()
}
// cc_prebuilt_test_library_shared installs a precompiled shared library
// to be used as a data dependency of a test-related module (such as cc_test, or
// cc_test_library).
func PrebuiltSharedTestLibraryFactory() android.Module {
module, library := NewPrebuiltLibrary(android.HostAndDeviceSupported)
library.BuildOnlyShared()
library.baseInstaller = NewTestInstaller()
return module.Init()
}
func NewPrebuiltSharedLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewPrebuiltLibrary(hod)
library.BuildOnlyShared()
// Prebuilt shared libraries can be included in APEXes
android.InitApexModule(module)
return module, library
}
// cc_prebuilt_library_static installs a precompiled static library that are
// listed in the srcs property in the device's directory.
func PrebuiltStaticLibraryFactory() android.Module {
module, _ := NewPrebuiltStaticLibrary(android.HostAndDeviceSupported)
return module.Init()
}
func NewPrebuiltStaticLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewPrebuiltLibrary(hod)
library.BuildOnlyStatic()
module.bazelHandler = &prebuiltStaticLibraryBazelHandler{module: module, library: library}
return module, library
}
type prebuiltObjectProperties struct {
Srcs []string `android:"path,arch_variant"`
}
type prebuiltObjectLinker struct {
android.Prebuilt
objectLinker
properties prebuiltObjectProperties
}
type prebuiltStaticLibraryBazelHandler struct {
bazelHandler
module *Module
library *libraryDecorator
}
func (h *prebuiltStaticLibraryBazelHandler) generateBazelBuildActions(ctx android.ModuleContext, label string) bool {
bazelCtx := ctx.Config().BazelContext
ccInfo, ok, err := bazelCtx.GetCcInfo(label, ctx.Arch().ArchType)
if err != nil {
ctx.ModuleErrorf("Error getting Bazel CcInfo: %s", err)
}
if !ok {
return false
}
staticLibs := ccInfo.CcStaticLibraryFiles
if len(staticLibs) > 1 {
ctx.ModuleErrorf("expected 1 static library from bazel target %q, got %s", label, staticLibs)
return false
}
// TODO(b/184543
|
{}
|
identifier_body
|
|
prebuilt.go
|
(p *prebuiltLibraryLinker) linkerDeps(ctx DepsContext, deps Deps) Deps {
return p.libraryDecorator.linkerDeps(ctx, deps)
}
func (p *prebuiltLibraryLinker) linkerFlags(ctx ModuleContext, flags Flags) Flags {
return flags
}
func (p *prebuiltLibraryLinker) linkerProps() []interface{} {
return p.libraryDecorator.linkerProps()
}
func (p *prebuiltLibraryLinker) link(ctx ModuleContext,
flags Flags, deps PathDeps, objs Objects) android.Path {
p.libraryDecorator.flagExporter.exportIncludes(ctx)
p.libraryDecorator.flagExporter.reexportDirs(deps.ReexportedDirs...)
p.libraryDecorator.flagExporter.reexportSystemDirs(deps.ReexportedSystemDirs...)
p.libraryDecorator.flagExporter.reexportFlags(deps.ReexportedFlags...)
p.libraryDecorator.flagExporter.reexportDeps(deps.ReexportedDeps...)
p.libraryDecorator.flagExporter.addExportedGeneratedHeaders(deps.ReexportedGeneratedHeaders...)
p.libraryDecorator.flagExporter.setProvider(ctx)
// TODO(ccross): verify shared library dependencies
srcs := p.prebuiltSrcs(ctx)
if len(srcs) > 0
|
}
if p.shared() {
p.unstrippedOutputFile = in
libName := p.libraryDecorator.getLibName(ctx) + flags.Toolchain.ShlibSuffix()
outputFile := android.PathForModuleOut(ctx, libName)
var implicits android.Paths
if p.stripper.NeedsStrip(ctx) {
stripFlags := flagsToStripFlags(flags)
stripped := android.PathForModuleOut(ctx, "stripped", libName)
p.stripper.StripExecutableOrSharedLib(ctx, in, stripped, stripFlags)
in = stripped
}
// Optimize out relinking against shared libraries whose interface hasn't changed by
// depending on a table of contents file instead of the library itself.
tocFile := android.PathForModuleOut(ctx, libName+".toc")
p.tocFile = android.OptionalPathForPath(tocFile)
transformSharedObjectToToc(ctx, outputFile, tocFile, builderFlags)
if ctx.Windows() && p.properties.Windows_import_lib != nil {
// Consumers of this library actually links to the import library in build
// time and dynamically links to the DLL in run time. i.e.
// a.exe <-- static link --> foo.lib <-- dynamic link --> foo.dll
importLibSrc := android.PathForModuleSrc(ctx, String(p.properties.Windows_import_lib))
importLibName := p.libraryDecorator.getLibName(ctx) + ".lib"
importLibOutputFile := android.PathForModuleOut(ctx, importLibName)
implicits = append(implicits, importLibOutputFile)
ctx.Build(pctx, android.BuildParams{
Rule: android.Cp,
Description: "prebuilt import library",
Input: importLibSrc,
Output: importLibOutputFile,
Args: map[string]string{
"cpFlags": "-L",
},
})
}
ctx.Build(pctx, android.BuildParams{
Rule: android.Cp,
Description: "prebuilt shared library",
Implicits: implicits,
Input: in,
Output: outputFile,
Args: map[string]string{
"cpFlags": "-L",
},
})
ctx.SetProvider(SharedLibraryInfoProvider, SharedLibraryInfo{
SharedLibrary: outputFile,
UnstrippedSharedLibrary: p.unstrippedOutputFile,
Target: ctx.Target(),
TableOfContents: p.tocFile,
})
return outputFile
}
}
if p.header() {
ctx.SetProvider(HeaderLibraryInfoProvider, HeaderLibraryInfo{})
return nil
}
return nil
}
func (p *prebuiltLibraryLinker) prebuiltSrcs(ctx android.BaseModuleContext) []string {
sanitize := ctx.Module().(*Module).sanitize
srcs := p.properties.Srcs
srcs = append(srcs, srcsForSanitizer(sanitize, p.properties.Sanitized)...)
if p.static() {
srcs = append(srcs, p.libraryDecorator.StaticProperties.Static.Srcs...)
srcs = append(srcs, srcsForSanitizer(sanitize, p.libraryDecorator.StaticProperties.Static.Sanitized)...)
}
if p.shared() {
srcs = append(srcs, p.libraryDecorator.SharedProperties.Shared.Srcs...)
srcs = append(srcs, srcsForSanitizer(sanitize, p.libraryDecorator.SharedProperties.Shared.Sanitized)...)
}
return srcs
}
func (p *prebuiltLibraryLinker) shared() bool {
return p.libraryDecorator.shared()
}
func (p *prebuiltLibraryLinker) nativeCoverage() bool {
return false
}
func (p *prebuiltLibraryLinker) disablePrebuilt() {
p.properties.Srcs = nil
}
// Implements versionedInterface
func (p *prebuiltLibraryLinker) implementationModuleName(name string) string {
return strings.TrimPrefix(name, "prebuilt_")
}
func NewPrebuiltLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewLibrary(hod)
module.compiler = nil
prebuilt := &prebuiltLibraryLinker{
libraryDecorator: library,
}
module.linker = prebuilt
module.library = prebuilt
module.AddProperties(&prebuilt.properties)
srcsSupplier := func(ctx android.BaseModuleContext, _ android.Module) []string {
return prebuilt.prebuiltSrcs(ctx)
}
android.InitPrebuiltModuleWithSrcSupplier(module, srcsSupplier, "srcs")
// Prebuilt libraries can be used in SDKs.
android.InitSdkAwareModule(module)
return module, library
}
// cc_prebuilt_library installs a precompiled shared library that are
// listed in the srcs property in the device's directory.
func PrebuiltLibraryFactory() android.Module {
module, _ := NewPrebuiltLibrary(android.HostAndDeviceSupported)
// Prebuilt shared libraries can be included in APEXes
android.InitApexModule(module)
return module.Init()
}
// cc_prebuilt_library_shared installs a precompiled shared library that are
// listed in the srcs property in the device's directory.
func PrebuiltSharedLibraryFactory() android.Module {
module, _ := NewPrebuiltSharedLibrary(android.HostAndDeviceSupported)
return module.Init()
}
// cc_prebuilt_test_library_shared installs a precompiled shared library
// to be used as a data dependency of a test-related module (such as cc_test, or
// cc_test_library).
func PrebuiltSharedTestLibraryFactory() android.Module {
module, library := NewPrebuiltLibrary(android.HostAndDeviceSupported)
library.BuildOnlyShared()
library.baseInstaller = NewTestInstaller()
return module.Init()
}
func NewPrebuiltSharedLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewPrebuiltLibrary(hod)
library.BuildOnlyShared()
// Prebuilt shared libraries can be included in APEXes
android.InitApexModule(module)
return module, library
}
// cc_prebuilt_library_static installs a precompiled static library that are
// listed in the srcs property in the device's directory.
func PrebuiltStaticLibraryFactory() android.Module {
module, _ := NewPrebuiltStaticLibrary(android.HostAndDeviceSupported)
return module.Init()
}
func NewPrebuiltStaticLibrary(hod android.HostOrDeviceSupported) (*Module, *libraryDecorator) {
module, library := NewPrebuiltLibrary(hod)
library.BuildOnlyStatic()
module.bazelHandler = &prebuiltStaticLibraryBazelHandler{module: module, library: library}
return module, library
}
type prebuiltObjectProperties struct {
Srcs []string `android:"path,arch_variant"`
}
type prebuiltObjectLinker struct {
android.Prebuilt
objectLinker
properties prebuiltObjectProperties
}
type prebuiltStaticLibraryBazelHandler struct {
bazelHandler
module *Module
library *libraryDecorator
}
func (h *prebuiltStaticLibraryBazelHandler) generateBazelBuildActions(ctx android.ModuleContext, label string) bool {
bazelCtx := ctx.Config().BazelContext
ccInfo, ok, err := bazelCtx.GetCcInfo(label, ctx.Arch().ArchType)
if err != nil {
ctx.ModuleErrorf("Error getting Bazel CcInfo: %s", err)
}
if !ok {
return false
}
staticLibs := ccInfo.CcStaticLibraryFiles
if len(staticLibs) > 1 {
ctx.ModuleErrorf("expected 1 static library from bazel target %q, got %s", label, staticLibs)
return false
}
// TODO(b/184543518
|
{
builderFlags := flagsToBuilderFlags(flags)
if len(srcs) > 1 {
ctx.PropertyErrorf("srcs", "multiple prebuilt source files")
return nil
}
p.libraryDecorator.exportVersioningMacroIfNeeded(ctx)
in := android.PathForModuleSrc(ctx, srcs[0])
if p.static() {
depSet := android.NewDepSetBuilder(android.TOPOLOGICAL).Direct(in).Build()
ctx.SetProvider(StaticLibraryInfoProvider, StaticLibraryInfo{
StaticLibrary: in,
TransitiveStaticLibrariesForOrdering: depSet,
})
return in
|
conditional_block
|
handTrack1.py
|
, 9 * cols / 30, 10 * cols / 30, 11 * cols / 30, 12 * cols / 30], dtype=np.uint32)
hand_rect_two_x = hand_rect_one_x + 10
hand_rect_two_y = hand_rect_one_y + 10
for i in range(total_rectangle):
cv2.rectangle(frame, (hand_rect_one_y[i], hand_rect_one_x[i]),
(hand_rect_two_y[i], hand_rect_two_x[i]),
(0, 255, 0), 1)
return frame
def hand_histogram(frame):
global hand_rect_one_x, hand_rect_one_y
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
roi = np.zeros([160, 10, 3], dtype=hsv_frame.dtype)
for i in range(total_rectangle):
roi[i * 10: i * 10 + 10, 0: 10] = hsv_frame[hand_rect_one_x[i]:hand_rect_one_x[i] + 10,
hand_rect_one_y[i]:hand_rect_one_y[i] + 10]
hand_hist = cv2.calcHist([roi], [0, 1], None, [180, 256], [0, 180, 0, 256])
return cv2.normalize(hand_hist, hand_hist, 0, 255, cv2.NORM_MINMAX)
def hist_masking(backFrame, frame, hist):
global fWidth, fHeight
global bx, by, areaHand, cropped, sx, sy, width, height
range = (int)((7000**(1/2)))
width = 2*int(range*1)
height = 2* int(range*1) * 1.5
sx = bx-int(width/2)
ex = bx + int(width/2)
sy = by-int(height/2)
ey = by + int(height/2)
if (sx < 0):
sx = 0+10
ex = width+10
bx = int((ex+sx)/2)
if (sy < 0):
sy = 0+10
ey = height+10
by = int((ey+sy)/2)
if (ex > fWidth):
ex = fWidth-10
sx = ex - width
bx = int((ex+sx)/2)
if (ey > fHeight):
ey = fHeight-10
sy = ey - height
by = int((ey+sy)/2)
sx = int(sx)
sy = int(sy)
ex = int(ex)
ey = int (ey)
start = (int(sx),int(sy))
end = (int(ex),int(ey))
colour = (100,100,100)
cv2.rectangle(frame, start, end, colour, 1)
if len(frame[sy:ey, sx : ex])>0:
cropped = frame[sy:ey, sx : ex]
hsv = cv2.cvtColor(cropped, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0, 1], hist, [0, 180, 0, 256], 1)
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
#disc = cv2.erode(disc, (5,5))
cv2.filter2D(dst, -1, disc, dst)
ret, thresh = cv2.threshold(dst, 150, 255, cv2.THRESH_BINARY)
# thresh = cv2.dilate(thresh, None, iterations=5)
thresh = cv2.merge((thresh, thresh, thresh))
return cv2.bitwise_and(cropped, thresh)
#return cv2.bitwise_and(cv2.bitwise_not(backFrame),cv2.bitwise_and(frame, thresh))
def centroid(max_contour):
global bx, by
moment = cv2.moments(max_contour)
if moment['m00'] != 0:
tempX = int(moment['m10'] / moment['m00'])
tempY = int(moment['m01'] / moment['m00'])
if ((bx-tempX)**2 + (by-tempY)**2 <= 100**2):
bx = tempX
by = tempY
#return cx, cy
#else:
#return None
def farthest_point(defects, contour, centroid):
global bx, by
if defects is not None and centroid is not None:
s = defects[:, 0][:, 0]
#cx, cy = centroid
centroid
x = np.array(contour[s][:, 0][:, 0], dtype=np.float)
y = np.array(contour[s][:, 0][:, 1], dtype=np.float)
xp = cv2.pow(cv2.subtract(x, bx), 2)
yp = cv2.pow(cv2.subtract(y, by), 2)
dist = cv2.sqrt(cv2.add(xp, yp))
dist_max_i = np.argmax(dist)
if dist_max_i < len(s):
farthest_defect = s[dist_max_i]
farthest_point = tuple(contour[farthest_defect][0])
return farthest_point
else:
return None
def manage_image_opr(backFrame, frame, hand_hist):
global cx, cy, cropped, fWidth, fHeight
hist_mask_image = hist_masking(backFrame, frame, hand_hist)
contour_list = contours(frame, hist_mask_image)
max_cont = max_contour(frame, contour_list)
#cnt_centroid =
centroid(max_cont)
cnt_centroid = cx, cy
#cv2.circle(cropped, cnt_centroid, 5, [255, 0, 255], -1)
if max_cont is not None:
hull = cv2.convexHull(max_cont, returnPoints=False)
defects = cv2.convexityDefects(max_cont, hull)
far_point = farthest_point(defects, max_cont, cnt_centroid)#cx, cy)#cnt_centroid)
#print("Centroid : " + str(cnt_centroid) + ", farthest Point : " + str(far_point)) #should be cnt_centroid
#cv2.circle(frame, far_point, 5, [0, 0, 255], -1)
pointX = bx
pointY = by
height, width, __ = frame.shape
lowXBound = width*1/3
highXBound = width*2/3
lowYBound = height*1/3
highYBound = height*2/3
if (bx > lowXBound and bx < highXBound):
if (by> highYBound):
pyautogui.scroll(int((highYBound-by)/1))
elif (by < lowYBound):
pyautogui.scroll(int((lowYBound-by)/1))
elif (by > lowYBound and by < highYBound):
if (bx> highXBound):
pyautogui.keyDown('ctrl')
pyautogui.scroll(int((highXBound-bx)/2))#('-')
pyautogui.keyUp('ctrl')
elif (bx < lowXBound):
pyautogui.keyDown('ctrl')
pyautogui.scroll(int((lowXBound-bx)/2))#press('+')
pyautogui.keyUp('ctrl')
#coordinate = (str(far_point)).split()
#print (coordinate[0])
#print (coordinate([1]))
'''
print ("point x: ", pointX, "point y: ", pointY)
print(width, "width")
speed = int((fHeight/2 - by)/10)
#print("height", height)
print("scroll speed ", speed)
if (speed < -20 or speed > 20): #region verically for scroll
speed/=5
pyautogui.scroll(speed) #scrolls faster depending on height
else:
speed = int((fWidth/2 - bx)/10)
print("zoom speed ", speed)
if (speed > 20):
speed/=5
pyautogui.keyDown('ctrl')
pyautogui.press('+')
pyautogui.keyUp('ctrl')
if (speed < -20):
speed/=5
pyautogui.keyDown('ctrl')
pyautogui.press('-')
pyautogui.keyUp('ctrl')
'''
def plotHand():
|
global lengthColec
data = np.random.normal(0, 21, 100)
bins = np.arange(0, 21, 1)
plot.xlim([min(data)-.5, max(data)+.5])
plot.hist(data, bins=bins, alpha=0.5)
plot.title('metaData plot')
plot.xlabel('side lengths)')
plot.ylabel('Number of occurance')
plot.show()
|
identifier_body
|
|
handTrack1.py
|
py))
#print("Should be : " + str(bx) + ", "+ str(by))
#print("bad")
#cv2.circle(frame, bx,by, 10,[10,10,10]
#print("num matched: " +str(numMatch))
if numMatch >neededHands and area_cnt > 2000 and ((tempx-bx)**2 + (tempy-by)**2 < 150**2) :#and (areaHand == -1 or abs(area_cnt -areaHand) <2000) and moment['m00'] != 0 and ((tempx-bx)**2 + (tempy-by)**2 < 100**2):#area_cnt>min and area_cnt<max and len(defects)>=2:
handFound = True
#if numMatch > 0 :#and ((tempx-bx)**2 + (tempy-by)**2 < 100**2) and and len(approx)>=5 and len(approx) <=19 and len(defects) >=2
#area_cnt>max_area
#print("\n\n in \n\n")
cv2.drawContours(cropped, cnt, -1, (0,255,0), 3)
#cv2.drawContours(cropped, exMatch, -1, (255,0,255), 3)
newHands.append(cnt)
areaHand = area_cnt
bx = tempx
by = tempy
'''
if bx < int(width/2)+20:
bx = int(width/2)
if bx > fWidth-int(width/2):
bx = fWidth-int(width/2)
if by < int(height/2):
by = int(height/2)
if by > fHeight-int(height/2):
by = fHeight-int(height/2)
'''
#print("Center: " + str(bx) + ", "+ str(by))
max_area = area_cnt
max_i = i
#print("accepted area" + str(area_cnt))
#cv2.drawContours(frame, cnt, -1, (0,255,0), 3)
#peri = cv2.arcLength(cnt, True)
#approx = cv2.approxPolyDP(cnt, 0.01 * peri, True)
#print("Num of sides: " + str(len(approx)))
lengthColec.append(len(approx))
#maxDef =max(defects[0].depth, defects[1].depth)
#print("convexityDefectDepth: " + str(maxDef))
#defHands.appned(cnt)
else:
handFound = False
return void
if max_i != -1:
return contour_list[max_i]
return None
except:
if max_i != -1:
return contour_list[max_i]
return None
#print("cont area average? lol: " + str(max_area))
return contour_list[max_i]
def draw_rect(frame):
rows, cols, _ = frame.shape
global total_rectangle, hand_rect_one_x, hand_rect_one_y, hand_rect_two_x, hand_rect_two_y
hand_rect_one_x = np.array(
[6 * rows / 30, 6 * rows / 30, 6 * rows / 30, 6 * rows / 30, 9 * rows / 30, 9 * rows / 30, 9 * rows / 30, 9 * rows / 30, 12 * rows / 30,
12 * rows / 30, 12 * rows / 30, 12 * rows / 30, 15 * rows / 30, 15 * rows / 30, 15 * rows / 30, 15 * rows / 30], dtype=np.uint32)
hand_rect_one_y = np.array(
[9 * cols / 30, 10 * cols / 30, 11 * cols / 30, 12 * cols / 30 , 9 * cols / 30, 10 * cols / 30, 11 * cols / 30, 12 * cols / 30, 9 * cols / 30,
10 * cols / 30, 11 * cols / 30, 12 * cols / 30, 9 * cols / 30, 10 * cols / 30, 11 * cols / 30, 12 * cols / 30], dtype=np.uint32)
hand_rect_two_x = hand_rect_one_x + 10
hand_rect_two_y = hand_rect_one_y + 10
for i in range(total_rectangle):
cv2.rectangle(frame, (hand_rect_one_y[i], hand_rect_one_x[i]),
(hand_rect_two_y[i], hand_rect_two_x[i]),
(0, 255, 0), 1)
return frame
def hand_histogram(frame):
global hand_rect_one_x, hand_rect_one_y
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
roi = np.zeros([160, 10, 3], dtype=hsv_frame.dtype)
for i in range(total_rectangle):
roi[i * 10: i * 10 + 10, 0: 10] = hsv_frame[hand_rect_one_x[i]:hand_rect_one_x[i] + 10,
hand_rect_one_y[i]:hand_rect_one_y[i] + 10]
hand_hist = cv2.calcHist([roi], [0, 1], None, [180, 256], [0, 180, 0, 256])
return cv2.normalize(hand_hist, hand_hist, 0, 255, cv2.NORM_MINMAX)
def hist_masking(backFrame, frame, hist):
global fWidth, fHeight
global bx, by, areaHand, cropped, sx, sy, width, height
range = (int)((7000**(1/2)))
width = 2*int(range*1)
height = 2* int(range*1) * 1.5
sx = bx-int(width/2)
ex = bx + int(width/2)
sy = by-int(height/2)
ey = by + int(height/2)
if (sx < 0):
sx = 0+10
ex = width+10
bx = int((ex+sx)/2)
if (sy < 0):
sy = 0+10
ey = height+10
by = int((ey+sy)/2)
if (ex > fWidth):
ex = fWidth-10
sx = ex - width
bx = int((ex+sx)/2)
if (ey > fHeight):
ey = fHeight-10
sy = ey - height
by = int((ey+sy)/2)
sx = int(sx)
sy = int(sy)
ex = int(ex)
ey = int (ey)
start = (int(sx),int(sy))
end = (int(ex),int(ey))
colour = (100,100,100)
cv2.rectangle(frame, start, end, colour, 1)
if len(frame[sy:ey, sx : ex])>0:
cropped = frame[sy:ey, sx : ex]
hsv = cv2.cvtColor(cropped, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0, 1], hist, [0, 180, 0, 256], 1)
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
#disc = cv2.erode(disc, (5,5))
cv2.filter2D(dst, -1, disc, dst)
ret, thresh = cv2.threshold(dst, 150, 255, cv2.THRESH_BINARY)
# thresh = cv2.dilate(thresh, None, iterations=5)
thresh = cv2.merge((thresh, thresh, thresh))
return cv2.bitwise_and(cropped, thresh)
#return cv2.bitwise_and(cv2.bitwise_not(backFrame),cv2.bitwise_and(frame, thresh))
def centroid(max_contour):
global bx, by
moment = cv2.moments(max_contour)
if moment['m00'] != 0:
tempX = int(moment['m10'] / moment['m00'])
tempY = int(moment['m01'] / moment['m00'])
if ((bx-tempX)**2 + (by-tempY)**2 <= 100**2):
bx = tempX
by = tempY
#return cx, cy
#else:
#return None
def
|
farthest_point
|
identifier_name
|
|
handTrack1.py
|
2.calcHist([roi], [0, 1], None, [180, 256], [0, 180, 0, 256])
return cv2.normalize(hand_hist, hand_hist, 0, 255, cv2.NORM_MINMAX)
def hist_masking(backFrame, frame, hist):
global fWidth, fHeight
global bx, by, areaHand, cropped, sx, sy, width, height
range = (int)((7000**(1/2)))
width = 2*int(range*1)
height = 2* int(range*1) * 1.5
sx = bx-int(width/2)
ex = bx + int(width/2)
sy = by-int(height/2)
ey = by + int(height/2)
if (sx < 0):
sx = 0+10
ex = width+10
bx = int((ex+sx)/2)
if (sy < 0):
sy = 0+10
ey = height+10
by = int((ey+sy)/2)
if (ex > fWidth):
ex = fWidth-10
sx = ex - width
bx = int((ex+sx)/2)
if (ey > fHeight):
ey = fHeight-10
sy = ey - height
by = int((ey+sy)/2)
sx = int(sx)
sy = int(sy)
ex = int(ex)
ey = int (ey)
start = (int(sx),int(sy))
end = (int(ex),int(ey))
colour = (100,100,100)
cv2.rectangle(frame, start, end, colour, 1)
if len(frame[sy:ey, sx : ex])>0:
cropped = frame[sy:ey, sx : ex]
hsv = cv2.cvtColor(cropped, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0, 1], hist, [0, 180, 0, 256], 1)
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
#disc = cv2.erode(disc, (5,5))
cv2.filter2D(dst, -1, disc, dst)
ret, thresh = cv2.threshold(dst, 150, 255, cv2.THRESH_BINARY)
# thresh = cv2.dilate(thresh, None, iterations=5)
thresh = cv2.merge((thresh, thresh, thresh))
return cv2.bitwise_and(cropped, thresh)
#return cv2.bitwise_and(cv2.bitwise_not(backFrame),cv2.bitwise_and(frame, thresh))
def centroid(max_contour):
global bx, by
moment = cv2.moments(max_contour)
if moment['m00'] != 0:
tempX = int(moment['m10'] / moment['m00'])
tempY = int(moment['m01'] / moment['m00'])
if ((bx-tempX)**2 + (by-tempY)**2 <= 100**2):
bx = tempX
by = tempY
#return cx, cy
#else:
#return None
def farthest_point(defects, contour, centroid):
global bx, by
if defects is not None and centroid is not None:
s = defects[:, 0][:, 0]
#cx, cy = centroid
centroid
x = np.array(contour[s][:, 0][:, 0], dtype=np.float)
y = np.array(contour[s][:, 0][:, 1], dtype=np.float)
xp = cv2.pow(cv2.subtract(x, bx), 2)
yp = cv2.pow(cv2.subtract(y, by), 2)
dist = cv2.sqrt(cv2.add(xp, yp))
dist_max_i = np.argmax(dist)
if dist_max_i < len(s):
farthest_defect = s[dist_max_i]
farthest_point = tuple(contour[farthest_defect][0])
return farthest_point
else:
return None
def manage_image_opr(backFrame, frame, hand_hist):
global cx, cy, cropped, fWidth, fHeight
hist_mask_image = hist_masking(backFrame, frame, hand_hist)
contour_list = contours(frame, hist_mask_image)
max_cont = max_contour(frame, contour_list)
#cnt_centroid =
centroid(max_cont)
cnt_centroid = cx, cy
#cv2.circle(cropped, cnt_centroid, 5, [255, 0, 255], -1)
if max_cont is not None:
hull = cv2.convexHull(max_cont, returnPoints=False)
defects = cv2.convexityDefects(max_cont, hull)
far_point = farthest_point(defects, max_cont, cnt_centroid)#cx, cy)#cnt_centroid)
#print("Centroid : " + str(cnt_centroid) + ", farthest Point : " + str(far_point)) #should be cnt_centroid
#cv2.circle(frame, far_point, 5, [0, 0, 255], -1)
pointX = bx
pointY = by
height, width, __ = frame.shape
lowXBound = width*1/3
highXBound = width*2/3
lowYBound = height*1/3
highYBound = height*2/3
if (bx > lowXBound and bx < highXBound):
if (by> highYBound):
pyautogui.scroll(int((highYBound-by)/1))
elif (by < lowYBound):
pyautogui.scroll(int((lowYBound-by)/1))
elif (by > lowYBound and by < highYBound):
if (bx> highXBound):
pyautogui.keyDown('ctrl')
pyautogui.scroll(int((highXBound-bx)/2))#('-')
pyautogui.keyUp('ctrl')
elif (bx < lowXBound):
pyautogui.keyDown('ctrl')
pyautogui.scroll(int((lowXBound-bx)/2))#press('+')
pyautogui.keyUp('ctrl')
#coordinate = (str(far_point)).split()
#print (coordinate[0])
#print (coordinate([1]))
'''
print ("point x: ", pointX, "point y: ", pointY)
print(width, "width")
speed = int((fHeight/2 - by)/10)
#print("height", height)
print("scroll speed ", speed)
if (speed < -20 or speed > 20): #region verically for scroll
speed/=5
pyautogui.scroll(speed) #scrolls faster depending on height
else:
speed = int((fWidth/2 - bx)/10)
print("zoom speed ", speed)
if (speed > 20):
speed/=5
pyautogui.keyDown('ctrl')
pyautogui.press('+')
pyautogui.keyUp('ctrl')
if (speed < -20):
speed/=5
pyautogui.keyDown('ctrl')
pyautogui.press('-')
pyautogui.keyUp('ctrl')
'''
def plotHand():
global lengthColec
data = np.random.normal(0, 21, 100)
bins = np.arange(0, 21, 1)
plot.xlim([min(data)-.5, max(data)+.5])
plot.hist(data, bins=bins, alpha=0.5)
plot.title('metaData plot')
plot.xlabel('side lengths)')
plot.ylabel('Number of occurance')
plot.show()
def main():
global hand_hist, resetCount, handFound
#global background
#global have_background
global cx
global cy,bx,by
global defHands, newHands
is_hand_hist_created = False
capture = cv2.VideoCapture(0)
while capture.isOpened():
|
if resetCount <=0:
bx = int(fWidth/2)
by = int(fHeight/2)
#print("Reset data: " + str(bx) + " " + str(resetCount) + " " + str(by))
pressed_key = cv2.waitKey(1)
_, frame = capture.read()
if is_hand_hist_created==False:
txtColor = [0,255,0]
frame = cv2.flip(frame,1)
cv2.putText((frame), "Place hand over all green rectangles, then press \'z\'", (10,70),cv2.FONT_HERSHEY_SIMPLEX, .7,txtColor, 2)
frame = cv2.flip(frame,1)
if pressed_key & 0xFF == ord('z'):
#getBack(frame)
is_hand_hist_created = True
#frame = cutOutFace(frame)
hand_hist = hand_histogram(frame)
|
conditional_block
|
|
handTrack1.py
|
2)
hand_rect_one_y = np.array(
[9 * cols / 30, 10 * cols / 30, 11 * cols / 30, 12 * cols / 30 , 9 * cols / 30, 10 * cols / 30, 11 * cols / 30, 12 * cols / 30, 9 * cols / 30,
10 * cols / 30, 11 * cols / 30, 12 * cols / 30, 9 * cols / 30, 10 * cols / 30, 11 * cols / 30, 12 * cols / 30], dtype=np.uint32)
hand_rect_two_x = hand_rect_one_x + 10
hand_rect_two_y = hand_rect_one_y + 10
for i in range(total_rectangle):
cv2.rectangle(frame, (hand_rect_one_y[i], hand_rect_one_x[i]),
(hand_rect_two_y[i], hand_rect_two_x[i]),
(0, 255, 0), 1)
return frame
def hand_histogram(frame):
global hand_rect_one_x, hand_rect_one_y
hsv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
roi = np.zeros([160, 10, 3], dtype=hsv_frame.dtype)
for i in range(total_rectangle):
roi[i * 10: i * 10 + 10, 0: 10] = hsv_frame[hand_rect_one_x[i]:hand_rect_one_x[i] + 10,
hand_rect_one_y[i]:hand_rect_one_y[i] + 10]
hand_hist = cv2.calcHist([roi], [0, 1], None, [180, 256], [0, 180, 0, 256])
return cv2.normalize(hand_hist, hand_hist, 0, 255, cv2.NORM_MINMAX)
def hist_masking(backFrame, frame, hist):
global fWidth, fHeight
global bx, by, areaHand, cropped, sx, sy, width, height
range = (int)((7000**(1/2)))
width = 2*int(range*1)
height = 2* int(range*1) * 1.5
sx = bx-int(width/2)
ex = bx + int(width/2)
sy = by-int(height/2)
ey = by + int(height/2)
if (sx < 0):
sx = 0+10
ex = width+10
bx = int((ex+sx)/2)
if (sy < 0):
sy = 0+10
ey = height+10
by = int((ey+sy)/2)
if (ex > fWidth):
ex = fWidth-10
sx = ex - width
bx = int((ex+sx)/2)
if (ey > fHeight):
ey = fHeight-10
sy = ey - height
by = int((ey+sy)/2)
sx = int(sx)
sy = int(sy)
ex = int(ex)
ey = int (ey)
start = (int(sx),int(sy))
end = (int(ex),int(ey))
colour = (100,100,100)
cv2.rectangle(frame, start, end, colour, 1)
if len(frame[sy:ey, sx : ex])>0:
cropped = frame[sy:ey, sx : ex]
hsv = cv2.cvtColor(cropped, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0, 1], hist, [0, 180, 0, 256], 1)
disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (20, 20))
#disc = cv2.erode(disc, (5,5))
cv2.filter2D(dst, -1, disc, dst)
ret, thresh = cv2.threshold(dst, 150, 255, cv2.THRESH_BINARY)
# thresh = cv2.dilate(thresh, None, iterations=5)
thresh = cv2.merge((thresh, thresh, thresh))
return cv2.bitwise_and(cropped, thresh)
#return cv2.bitwise_and(cv2.bitwise_not(backFrame),cv2.bitwise_and(frame, thresh))
def centroid(max_contour):
global bx, by
moment = cv2.moments(max_contour)
if moment['m00'] != 0:
tempX = int(moment['m10'] / moment['m00'])
tempY = int(moment['m01'] / moment['m00'])
if ((bx-tempX)**2 + (by-tempY)**2 <= 100**2):
bx = tempX
by = tempY
#return cx, cy
#else:
#return None
def farthest_point(defects, contour, centroid):
global bx, by
if defects is not None and centroid is not None:
s = defects[:, 0][:, 0]
#cx, cy = centroid
centroid
x = np.array(contour[s][:, 0][:, 0], dtype=np.float)
y = np.array(contour[s][:, 0][:, 1], dtype=np.float)
xp = cv2.pow(cv2.subtract(x, bx), 2)
yp = cv2.pow(cv2.subtract(y, by), 2)
dist = cv2.sqrt(cv2.add(xp, yp))
dist_max_i = np.argmax(dist)
if dist_max_i < len(s):
farthest_defect = s[dist_max_i]
farthest_point = tuple(contour[farthest_defect][0])
return farthest_point
else:
return None
def manage_image_opr(backFrame, frame, hand_hist):
global cx, cy, cropped, fWidth, fHeight
hist_mask_image = hist_masking(backFrame, frame, hand_hist)
contour_list = contours(frame, hist_mask_image)
max_cont = max_contour(frame, contour_list)
#cnt_centroid =
centroid(max_cont)
cnt_centroid = cx, cy
#cv2.circle(cropped, cnt_centroid, 5, [255, 0, 255], -1)
if max_cont is not None:
hull = cv2.convexHull(max_cont, returnPoints=False)
defects = cv2.convexityDefects(max_cont, hull)
far_point = farthest_point(defects, max_cont, cnt_centroid)#cx, cy)#cnt_centroid)
#print("Centroid : " + str(cnt_centroid) + ", farthest Point : " + str(far_point)) #should be cnt_centroid
#cv2.circle(frame, far_point, 5, [0, 0, 255], -1)
pointX = bx
pointY = by
height, width, __ = frame.shape
lowXBound = width*1/3
highXBound = width*2/3
lowYBound = height*1/3
highYBound = height*2/3
if (bx > lowXBound and bx < highXBound):
if (by> highYBound):
pyautogui.scroll(int((highYBound-by)/1))
elif (by < lowYBound):
pyautogui.scroll(int((lowYBound-by)/1))
elif (by > lowYBound and by < highYBound):
if (bx> highXBound):
pyautogui.keyDown('ctrl')
pyautogui.scroll(int((highXBound-bx)/2))#('-')
pyautogui.keyUp('ctrl')
elif (bx < lowXBound):
pyautogui.keyDown('ctrl')
pyautogui.scroll(int((lowXBound-bx)/2))#press('+')
pyautogui.keyUp('ctrl')
#coordinate = (str(far_point)).split()
#print (coordinate[0])
#print (coordinate([1]))
'''
print ("point x: ", pointX, "point y: ", pointY)
print(width, "width")
speed = int((fHeight/2 - by)/10)
#print("height", height)
print("scroll speed ", speed)
if (speed < -20 or speed > 20): #region verically for scroll
speed/=5
pyautogui.scroll(speed) #scrolls faster depending on height
else:
speed = int((fWidth/2 - bx)/10)
print("zoom speed ", speed)
if (speed > 20):
speed/=5
pyautogui.keyDown('ctrl')
pyautogui.press('+')
pyautogui.keyUp('ctrl')
if (speed < -20):
speed/=5
|
random_line_split
|
||
histogramMatching.py
|
((rect_corner1[0]+rect_corner2[0])/2,(rect_corner1[1]+rect_corner2[1])/2)
# labels blob numbers
cv2.putText(imgFrame2Copy, str(i), blobs[i].centerPositions[-1], intFontFace, dblFontScale, (0,255,0), intFontThickness);
# draws box around the blob
cv2.rectangle(imgFrame2Copy, rect_corner1,rect_corner2, (0,0,255))
#draws the contours on the image
def drawAndShowContours(imageSize,contours,strImageName):
image = np.zeros(imageSize, dtype=np.uint8)
cv2.drawContours(image, contours, -1,(255,255,255), -1)
cv2.imshow(strImageName, image);
#draws the contours similar to the drawAndShowContours function
#but here the input provided is not the contours but object of class Blob
def drawAndShowBlobs(imageSize,blobs,strWindowsName):
image = np.zeros(imageSize, dtype=np.uint8)
contours = []
for blob in blobs:
if blob.blnStillBeingTracked == True:
contours.append(blob.currentContour)
cv2.drawContours(image, contours, -1,(255,255,255), -1);
cv2.imshow(strWindowsName, image);
#find the distance between two points p1 and p2
def
|
(point1,point2):
intX = abs(point1[0] - point2[0])
intY = abs(point1[1] - point2[1])
return math.sqrt(math.pow(intX, 2) + math.pow(intY, 2))
#matching algorithm to corelate two blob objects by matching it with the expected one
def matchCurrentFrameBlobsToExistingBlobs(existingBlobs,currentFrameBlobs):
for existingBlob in existingBlobs:
existingBlob.blnCurrentMatchFoundOrNewBlob = False
existingBlob.predictNextPosition()
for currentFrameBlob in currentFrameBlobs:
intIndexOfLeastDistance = 0
dblLeastDistance = 100000.0
for i in range(len(existingBlobs)):
if (existingBlobs[i].blnStillBeingTracked == True):
dblDistance = distanceBetweenPoints(currentFrameBlob.centerPositions[-1], existingBlobs[i].predictedNextPosition)
# print dblDistance
if (dblDistance < dblLeastDistance):
dblLeastDistance = dblDistance
intIndexOfLeastDistance = i
if (dblLeastDistance < currentFrameBlob.dblCurrentDiagonalSize * 1.15): #1.15 origianal, 5
addBlobToExistingBlobs(currentFrameBlob, existingBlobs, intIndexOfLeastDistance)
else:
addNewBlob(currentFrameBlob, existingBlobs)
for existingBlob in existingBlobs:
if (existingBlob.blnCurrentMatchFoundOrNewBlob == False):
existingBlob.intNumOfConsecutiveFramesWithoutAMatch +=1;
if (existingBlob.intNumOfConsecutiveFramesWithoutAMatch >= 5):
existingBlob.blnStillBeingTracked = False;
#adds the details of the matching blob to the existingBlob
def addBlobToExistingBlobs(currentFrameBlob,existingBlobs,i):
# print 'found continuos blob'
existingBlobs[i].noOfTimesAppeared += 1
existingBlobs[i].rois.append(currentFrameBlob.currentROI)
existingBlobs[i].featureMatches += currentFrameBlob.featureMatches
existingBlobs[i].noOfTimesAppeared += currentFrameBlob.noOfTimesAppeared
existingBlobs[i].currentContour = currentFrameBlob.currentContour;
existingBlobs[i].currentBoundingRect = currentFrameBlob.currentBoundingRect;
existingBlobs[i].centerPositions.append(currentFrameBlob.centerPositions[-1])
# if len(existingBlobs[i].centerPositions) > 30:
# del existingBlobs[i].centerPositions[0]
existingBlobs[i].dblCurrentDiagonalSize = currentFrameBlob.dblCurrentDiagonalSize;
existingBlobs[i].dblCurrentAspectRatio = currentFrameBlob.dblCurrentAspectRatio;
existingBlobs[i].blnStillBeingTracked = True;
existingBlobs[i].blnCurrentMatchFoundOrNewBlob = True;
#adds new blob to the list
def addNewBlob(currentFrameBlob,existingBlobs):
currentFrameBlob.blnCurrentMatchFoundOrNewBlob = True
existingBlobs.append(currentFrameBlob)
#CLASS
#class Blob consisting of variables and functions related to it
class Blob:
#functions
def printInfo(self):
print 'area: '+str(self.area)+' Pos: '+str(self.centerPositions)
def __init__(self, _contour,srcImage):
self.centerPositions = []
self.predictedNextPosition = [-1,-1]
self.currentContour = _contour
# mask = np.zeros(imgFrame2.shape, np.uint8)
# cv2.drawContours(mask, self.currentContour, -1, (255,255,255),1)
# roi = cv2.bitwise_and(imgFrame2,imgFrame2,mask=self.currentContour)
# cv2.imshow("roii",roi)
self.currentBoundingRect = cv2.boundingRect(self.currentContour) #x,y,w,h
x = (self.currentBoundingRect[0] + self.currentBoundingRect[0] + self.currentBoundingRect[2])/2
y = (self.currentBoundingRect[1] + self.currentBoundingRect[1] + self.currentBoundingRect[3]) / 2
self.currentCenter = (x,y)
self.width = self.currentBoundingRect[2]
self.height = self.currentBoundingRect[3]
self.area = self.currentBoundingRect[2] * self.currentBoundingRect[3]
self.centerPositions.append(self.currentCenter)
self.dblCurrentDiagonalSize = math.sqrt(math.pow(self.currentBoundingRect[2], 2) + math.pow(self.currentBoundingRect[3], 2));
self.dblCurrentAspectRatio = float(self.currentBoundingRect[2])/float(self.currentBoundingRect[3])
x,y,w,h = self.currentBoundingRect #x,y,w,h
self.currentROI = srcImage[y:y+h, x:x+w]
self.rois = []
self.noOfTimesAppeared = 1
self.featureMatches = 0
# flags
self.blnStillBeingTracked = True;
self.blnCurrentMatchFoundOrNewBlob = True;
self.intNumOfConsecutiveFramesWithoutAMatch = 0;
def predictNextPosition(self):
numPositions = len(self.centerPositions)
if (numPositions == 1):
self.predictedNextPosition[0] = self.centerPositions[-1][0]
self.predictedNextPosition[1] = self.centerPositions[-1][1]
elif (numPositions == 2):
deltaX = self.centerPositions[1][0] - self.centerPositions[0][0]
deltaY = self.centerPositions[1][1] - self.centerPositions[0][1]
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY
elif (numPositions == 3):
sumOfXChanges = ((self.centerPositions[2][0] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][0] - self.centerPositions[0][0]) * 1)
deltaX = int(round(float(sumOfXChanges)/3.0))
sumOfYChanges = ((self.centerPositions[2][1] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][1] - self.centerPositions[0][1]) * 1)
deltaY = int(round(float(sumOfYChanges) / 3.0))
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY
elif (numPositions == 4) :
sumOfXChanges = ((self.centerPositions[3][0] - self.centerPositions[2][0]) * 3) + \
((self.centerPositions[2][0] - self.centerPositions[1][0]) * 2) + \
((self.centerPositions[1][0] - self.centerPositions[0][0]) * 1)
deltaX = int(round(float(sumOfXChanges) / 6.0))
sumOfYChanges = ((self.centerPositions[3][1] - self.centerPositions[2][1]) * 3) + \
((self.centerPositions[2][1] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][1] - self.centerPositions[0][1]) * 1)
deltaY = int(round(float(sumOfYChanges) / 6.0))
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX;
self.predictedNextPosition[1
|
distanceBetweenPoints
|
identifier_name
|
histogramMatching.py
|
((rect_corner1[0]+rect_corner2[0])/2,(rect_corner1[1]+rect_corner2[1])/2)
# labels blob numbers
cv2.putText(imgFrame2Copy, str(i), blobs[i].centerPositions[-1], intFontFace, dblFontScale, (0,255,0), intFontThickness);
# draws box around the blob
cv2.rectangle(imgFrame2Copy, rect_corner1,rect_corner2, (0,0,255))
#draws the contours on the image
def drawAndShowContours(imageSize,contours,strImageName):
image = np.zeros(imageSize, dtype=np.uint8)
cv2.drawContours(image, contours, -1,(255,255,255), -1)
cv2.imshow(strImageName, image);
#draws the contours similar to the drawAndShowContours function
#but here the input provided is not the contours but object of class Blob
def drawAndShowBlobs(imageSize,blobs,strWindowsName):
|
#find the distance between two points p1 and p2
def distanceBetweenPoints(point1,point2):
intX = abs(point1[0] - point2[0])
intY = abs(point1[1] - point2[1])
return math.sqrt(math.pow(intX, 2) + math.pow(intY, 2))
#matching algorithm to corelate two blob objects by matching it with the expected one
def matchCurrentFrameBlobsToExistingBlobs(existingBlobs,currentFrameBlobs):
for existingBlob in existingBlobs:
existingBlob.blnCurrentMatchFoundOrNewBlob = False
existingBlob.predictNextPosition()
for currentFrameBlob in currentFrameBlobs:
intIndexOfLeastDistance = 0
dblLeastDistance = 100000.0
for i in range(len(existingBlobs)):
if (existingBlobs[i].blnStillBeingTracked == True):
dblDistance = distanceBetweenPoints(currentFrameBlob.centerPositions[-1], existingBlobs[i].predictedNextPosition)
# print dblDistance
if (dblDistance < dblLeastDistance):
dblLeastDistance = dblDistance
intIndexOfLeastDistance = i
if (dblLeastDistance < currentFrameBlob.dblCurrentDiagonalSize * 1.15): #1.15 origianal, 5
addBlobToExistingBlobs(currentFrameBlob, existingBlobs, intIndexOfLeastDistance)
else:
addNewBlob(currentFrameBlob, existingBlobs)
for existingBlob in existingBlobs:
if (existingBlob.blnCurrentMatchFoundOrNewBlob == False):
existingBlob.intNumOfConsecutiveFramesWithoutAMatch +=1;
if (existingBlob.intNumOfConsecutiveFramesWithoutAMatch >= 5):
existingBlob.blnStillBeingTracked = False;
#adds the details of the matching blob to the existingBlob
def addBlobToExistingBlobs(currentFrameBlob,existingBlobs,i):
# print 'found continuos blob'
existingBlobs[i].noOfTimesAppeared += 1
existingBlobs[i].rois.append(currentFrameBlob.currentROI)
existingBlobs[i].featureMatches += currentFrameBlob.featureMatches
existingBlobs[i].noOfTimesAppeared += currentFrameBlob.noOfTimesAppeared
existingBlobs[i].currentContour = currentFrameBlob.currentContour;
existingBlobs[i].currentBoundingRect = currentFrameBlob.currentBoundingRect;
existingBlobs[i].centerPositions.append(currentFrameBlob.centerPositions[-1])
# if len(existingBlobs[i].centerPositions) > 30:
# del existingBlobs[i].centerPositions[0]
existingBlobs[i].dblCurrentDiagonalSize = currentFrameBlob.dblCurrentDiagonalSize;
existingBlobs[i].dblCurrentAspectRatio = currentFrameBlob.dblCurrentAspectRatio;
existingBlobs[i].blnStillBeingTracked = True;
existingBlobs[i].blnCurrentMatchFoundOrNewBlob = True;
#adds new blob to the list
def addNewBlob(currentFrameBlob,existingBlobs):
currentFrameBlob.blnCurrentMatchFoundOrNewBlob = True
existingBlobs.append(currentFrameBlob)
#CLASS
#class Blob consisting of variables and functions related to it
class Blob:
#functions
def printInfo(self):
print 'area: '+str(self.area)+' Pos: '+str(self.centerPositions)
def __init__(self, _contour,srcImage):
self.centerPositions = []
self.predictedNextPosition = [-1,-1]
self.currentContour = _contour
# mask = np.zeros(imgFrame2.shape, np.uint8)
# cv2.drawContours(mask, self.currentContour, -1, (255,255,255),1)
# roi = cv2.bitwise_and(imgFrame2,imgFrame2,mask=self.currentContour)
# cv2.imshow("roii",roi)
self.currentBoundingRect = cv2.boundingRect(self.currentContour) #x,y,w,h
x = (self.currentBoundingRect[0] + self.currentBoundingRect[0] + self.currentBoundingRect[2])/2
y = (self.currentBoundingRect[1] + self.currentBoundingRect[1] + self.currentBoundingRect[3]) / 2
self.currentCenter = (x,y)
self.width = self.currentBoundingRect[2]
self.height = self.currentBoundingRect[3]
self.area = self.currentBoundingRect[2] * self.currentBoundingRect[3]
self.centerPositions.append(self.currentCenter)
self.dblCurrentDiagonalSize = math.sqrt(math.pow(self.currentBoundingRect[2], 2) + math.pow(self.currentBoundingRect[3], 2));
self.dblCurrentAspectRatio = float(self.currentBoundingRect[2])/float(self.currentBoundingRect[3])
x,y,w,h = self.currentBoundingRect #x,y,w,h
self.currentROI = srcImage[y:y+h, x:x+w]
self.rois = []
self.noOfTimesAppeared = 1
self.featureMatches = 0
# flags
self.blnStillBeingTracked = True;
self.blnCurrentMatchFoundOrNewBlob = True;
self.intNumOfConsecutiveFramesWithoutAMatch = 0;
def predictNextPosition(self):
numPositions = len(self.centerPositions)
if (numPositions == 1):
self.predictedNextPosition[0] = self.centerPositions[-1][0]
self.predictedNextPosition[1] = self.centerPositions[-1][1]
elif (numPositions == 2):
deltaX = self.centerPositions[1][0] - self.centerPositions[0][0]
deltaY = self.centerPositions[1][1] - self.centerPositions[0][1]
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY
elif (numPositions == 3):
sumOfXChanges = ((self.centerPositions[2][0] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][0] - self.centerPositions[0][0]) * 1)
deltaX = int(round(float(sumOfXChanges)/3.0))
sumOfYChanges = ((self.centerPositions[2][1] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][1] - self.centerPositions[0][1]) * 1)
deltaY = int(round(float(sumOfYChanges) / 3.0))
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY
elif (numPositions == 4) :
sumOfXChanges = ((self.centerPositions[3][0] - self.centerPositions[2][0]) * 3) + \
((self.centerPositions[2][0] - self.centerPositions[1][0]) * 2) + \
((self.centerPositions[1][0] - self.centerPositions[0][0]) * 1)
deltaX = int(round(float(sumOfXChanges) / 6.0))
sumOfYChanges = ((self.centerPositions[3][1] - self.centerPositions[2][1]) * 3) + \
((self.centerPositions[2][1] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][1] - self.centerPositions[0][1]) * 1)
deltaY = int(round(float(sumOfYChanges) / 6.0))
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX;
self.predictedNextPosition[1
|
image = np.zeros(imageSize, dtype=np.uint8)
contours = []
for blob in blobs:
if blob.blnStillBeingTracked == True:
contours.append(blob.currentContour)
cv2.drawContours(image, contours, -1,(255,255,255), -1);
cv2.imshow(strWindowsName, image);
|
identifier_body
|
histogramMatching.py
|
#changing the colorspace to grayscale
imgFrame1Copy = cv2.cvtColor(imgFrame1Copy,cv2.COLOR_BGR2GRAY)
imgFrame2Copy = cv2.cvtColor(imgFrame2Copy,cv2.COLOR_BGR2GRAY)
#applying gaussianblur
imgFrame1Copy = cv2.GaussianBlur(imgFrame1Copy,(5,5),0)
imgFrame2Copy = cv2.GaussianBlur(imgFrame2Copy,(5,5),0)
#finding the difference of the two frames and thresholding the diff
imgDifference = cv2.absdiff(imgFrame1Copy,imgFrame2Copy)
_,imgThresh = cv2.threshold(imgDifference,30,255,cv2.THRESH_BINARY)
# cv2.imshow("imgThresh",imgThresh)
# morphological operations: dilation and erosion
kernel = np.ones((5,5),np.uint8)
imgThresh = cv2.dilate(imgThresh,kernel,iterations = 1)
imgThresh = cv2.dilate(imgThresh,kernel,iterations = 1)
imgThresh = cv2.erode(imgThresh,kernel,iterations = 1)
#finding contours of the thresholded image
contours, hierarchy = cv2.findContours(imgThresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#finding and drawing convex hulls
hulls = [] #used to store hulls
for cnt in contours:
hulls.append(cv2.convexHull(cnt))
return hulls
#draws the rectangles on the motion detected object
def drawBlobInfoOnImage(blobs,imgFrame2Copy):
for i in range(len(blobs)):
if (blobs[i].blnStillBeingTracked == True):
rect_corner1 = (blobs[i].currentBoundingRect[0],blobs[i].currentBoundingRect[1])
rect_corner2 = (blobs[i].currentBoundingRect[0]+blobs[i].width, blobs[i].currentBoundingRect[1]+blobs[i].height)
# font settings
intFontFace = cv2.FONT_HERSHEY_SIMPLEX;
dblFontScale = blobs[i].dblCurrentDiagonalSize / 60.0
intFontThickness = int(round(dblFontScale * 1.0))
point = ((rect_corner1[0]+rect_corner2[0])/2,(rect_corner1[1]+rect_corner2[1])/2)
# labels blob numbers
cv2.putText(imgFrame2Copy, str(i), blobs[i].centerPositions[-1], intFontFace, dblFontScale, (0,255,0), intFontThickness);
# draws box around the blob
cv2.rectangle(imgFrame2Copy, rect_corner1,rect_corner2, (0,0,255))
#draws the contours on the image
def drawAndShowContours(imageSize,contours,strImageName):
image = np.zeros(imageSize, dtype=np.uint8)
cv2.drawContours(image, contours, -1,(255,255,255), -1)
cv2.imshow(strImageName, image);
#draws the contours similar to the drawAndShowContours function
#but here the input provided is not the contours but object of class Blob
def drawAndShowBlobs(imageSize,blobs,strWindowsName):
image = np.zeros(imageSize, dtype=np.uint8)
contours = []
for blob in blobs:
if blob.blnStillBeingTracked == True:
contours.append(blob.currentContour)
cv2.drawContours(image, contours, -1,(255,255,255), -1);
cv2.imshow(strWindowsName, image);
#find the distance between two points p1 and p2
def distanceBetweenPoints(point1,point2):
intX = abs(point1[0] - point2[0])
intY = abs(point1[1] - point2[1])
return math.sqrt(math.pow(intX, 2) + math.pow(intY, 2))
#matching algorithm to corelate two blob objects by matching it with the expected one
def matchCurrentFrameBlobsToExistingBlobs(existingBlobs,currentFrameBlobs):
for existingBlob in existingBlobs:
existingBlob.blnCurrentMatchFoundOrNewBlob = False
existingBlob.predictNextPosition()
for currentFrameBlob in currentFrameBlobs:
intIndexOfLeastDistance = 0
dblLeastDistance = 100000.0
for i in range(len(existingBlobs)):
if (existingBlobs[i].blnStillBeingTracked == True):
dblDistance = distanceBetweenPoints(currentFrameBlob.centerPositions[-1], existingBlobs[i].predictedNextPosition)
# print dblDistance
if (dblDistance < dblLeastDistance):
dblLeastDistance = dblDistance
intIndexOfLeastDistance = i
if (dblLeastDistance < currentFrameBlob.dblCurrentDiagonalSize * 1.15): #1.15 origianal, 5
addBlobToExistingBlobs(currentFrameBlob, existingBlobs, intIndexOfLeastDistance)
else:
addNewBlob(currentFrameBlob, existingBlobs)
for existingBlob in existingBlobs:
if (existingBlob.blnCurrentMatchFoundOrNewBlob == False):
existingBlob.intNumOfConsecutiveFramesWithoutAMatch +=1;
if (existingBlob.intNumOfConsecutiveFramesWithoutAMatch >= 5):
existingBlob.blnStillBeingTracked = False;
#adds the details of the matching blob to the existingBlob
def addBlobToExistingBlobs(currentFrameBlob,existingBlobs,i):
# print 'found continuos blob'
existingBlobs[i].noOfTimesAppeared += 1
existingBlobs[i].rois.append(currentFrameBlob.currentROI)
existingBlobs[i].featureMatches += currentFrameBlob.featureMatches
existingBlobs[i].noOfTimesAppeared += currentFrameBlob.noOfTimesAppeared
existingBlobs[i].currentContour = currentFrameBlob.currentContour;
existingBlobs[i].currentBoundingRect = currentFrameBlob.currentBoundingRect;
existingBlobs[i].centerPositions.append(currentFrameBlob.centerPositions[-1])
# if len(existingBlobs[i].centerPositions) > 30:
# del existingBlobs[i].centerPositions[0]
existingBlobs[i].dblCurrentDiagonalSize = currentFrameBlob.dblCurrentDiagonalSize;
existingBlobs[i].dblCurrentAspectRatio = currentFrameBlob.dblCurrentAspectRatio;
existingBlobs[i].blnStillBeingTracked = True;
existingBlobs[i].blnCurrentMatchFoundOrNewBlob = True;
#adds new blob to the list
def addNewBlob(currentFrameBlob,existingBlobs):
currentFrameBlob.blnCurrentMatchFoundOrNewBlob = True
existingBlobs.append(currentFrameBlob)
#CLASS
#class Blob consisting of variables and functions related to it
class Blob:
#functions
def printInfo(self):
print 'area: '+str(self.area)+' Pos: '+str(self.centerPositions)
def __init__(self, _contour,srcImage):
self.centerPositions = []
self.predictedNextPosition = [-1,-1]
self.currentContour = _contour
# mask = np.zeros(imgFrame2.shape, np.uint8)
# cv2.drawContours(mask, self.currentContour, -1, (255,255,255),1)
# roi = cv2.bitwise_and(imgFrame2,imgFrame2,mask=self.currentContour)
# cv2.imshow("roii",roi)
self.currentBoundingRect = cv2.boundingRect(self.currentContour) #x,y,w,h
x = (self.currentBoundingRect[0] + self.currentBoundingRect[0] + self.currentBoundingRect[2])/2
y = (self.currentBoundingRect[1] + self.currentBoundingRect[1] + self.currentBoundingRect[3]) / 2
self.currentCenter = (x,y)
self.width = self.currentBoundingRect[2]
self.height = self.currentBoundingRect[3]
self.area = self.currentBoundingRect[2] * self.currentBoundingRect[3]
self.centerPositions.append(self.currentCenter)
self.dblCurrentDiagonalSize = math.sqrt(math.pow(self.currentBoundingRect[2], 2) + math.pow(self.currentBoundingRect[3], 2));
self.dblCurrentAspectRatio = float(self.currentBoundingRect[2])/float(self.currentBoundingRect[3])
x,y,w,h = self.currentBoundingRect #x,y,w,h
self.currentROI = srcImage[y:y+h, x:x+w]
self.rois = []
self.noOfTimesAppeared = 1
self.featureMatches = 0
# flags
self.blnStillBeingTracked = True;
self.blnCurrentMatchFoundOrNewBlob = True;
self.intNumOfConsecutiveFramesWithoutAMatch = 0;
def predictNextPosition(self):
numPositions = len(self.centerPositions)
if (numPositions == 1
|
def getDifferenceHulls(imgFrame1,imgFrame2):
#making duplicates of the above frames
imgFrame1Copy = imgFrame1.copy()
imgFrame2Copy = imgFrame2.copy()
|
random_line_split
|
|
histogramMatching.py
|
((rect_corner1[0]+rect_corner2[0])/2,(rect_corner1[1]+rect_corner2[1])/2)
# labels blob numbers
cv2.putText(imgFrame2Copy, str(i), blobs[i].centerPositions[-1], intFontFace, dblFontScale, (0,255,0), intFontThickness);
# draws box around the blob
cv2.rectangle(imgFrame2Copy, rect_corner1,rect_corner2, (0,0,255))
#draws the contours on the image
def drawAndShowContours(imageSize,contours,strImageName):
image = np.zeros(imageSize, dtype=np.uint8)
cv2.drawContours(image, contours, -1,(255,255,255), -1)
cv2.imshow(strImageName, image);
#draws the contours similar to the drawAndShowContours function
#but here the input provided is not the contours but object of class Blob
def drawAndShowBlobs(imageSize,blobs,strWindowsName):
image = np.zeros(imageSize, dtype=np.uint8)
contours = []
for blob in blobs:
if blob.blnStillBeingTracked == True:
contours.append(blob.currentContour)
cv2.drawContours(image, contours, -1,(255,255,255), -1);
cv2.imshow(strWindowsName, image);
#find the distance between two points p1 and p2
def distanceBetweenPoints(point1,point2):
intX = abs(point1[0] - point2[0])
intY = abs(point1[1] - point2[1])
return math.sqrt(math.pow(intX, 2) + math.pow(intY, 2))
#matching algorithm to corelate two blob objects by matching it with the expected one
def matchCurrentFrameBlobsToExistingBlobs(existingBlobs,currentFrameBlobs):
for existingBlob in existingBlobs:
existingBlob.blnCurrentMatchFoundOrNewBlob = False
existingBlob.predictNextPosition()
for currentFrameBlob in currentFrameBlobs:
intIndexOfLeastDistance = 0
dblLeastDistance = 100000.0
for i in range(len(existingBlobs)):
if (existingBlobs[i].blnStillBeingTracked == True):
dblDistance = distanceBetweenPoints(currentFrameBlob.centerPositions[-1], existingBlobs[i].predictedNextPosition)
# print dblDistance
if (dblDistance < dblLeastDistance):
dblLeastDistance = dblDistance
intIndexOfLeastDistance = i
if (dblLeastDistance < currentFrameBlob.dblCurrentDiagonalSize * 1.15): #1.15 origianal, 5
|
else:
addNewBlob(currentFrameBlob, existingBlobs)
for existingBlob in existingBlobs:
if (existingBlob.blnCurrentMatchFoundOrNewBlob == False):
existingBlob.intNumOfConsecutiveFramesWithoutAMatch +=1;
if (existingBlob.intNumOfConsecutiveFramesWithoutAMatch >= 5):
existingBlob.blnStillBeingTracked = False;
#adds the details of the matching blob to the existingBlob
def addBlobToExistingBlobs(currentFrameBlob,existingBlobs,i):
# print 'found continuos blob'
existingBlobs[i].noOfTimesAppeared += 1
existingBlobs[i].rois.append(currentFrameBlob.currentROI)
existingBlobs[i].featureMatches += currentFrameBlob.featureMatches
existingBlobs[i].noOfTimesAppeared += currentFrameBlob.noOfTimesAppeared
existingBlobs[i].currentContour = currentFrameBlob.currentContour;
existingBlobs[i].currentBoundingRect = currentFrameBlob.currentBoundingRect;
existingBlobs[i].centerPositions.append(currentFrameBlob.centerPositions[-1])
# if len(existingBlobs[i].centerPositions) > 30:
# del existingBlobs[i].centerPositions[0]
existingBlobs[i].dblCurrentDiagonalSize = currentFrameBlob.dblCurrentDiagonalSize;
existingBlobs[i].dblCurrentAspectRatio = currentFrameBlob.dblCurrentAspectRatio;
existingBlobs[i].blnStillBeingTracked = True;
existingBlobs[i].blnCurrentMatchFoundOrNewBlob = True;
#adds new blob to the list
def addNewBlob(currentFrameBlob,existingBlobs):
currentFrameBlob.blnCurrentMatchFoundOrNewBlob = True
existingBlobs.append(currentFrameBlob)
#CLASS
#class Blob consisting of variables and functions related to it
class Blob:
#functions
def printInfo(self):
print 'area: '+str(self.area)+' Pos: '+str(self.centerPositions)
def __init__(self, _contour,srcImage):
self.centerPositions = []
self.predictedNextPosition = [-1,-1]
self.currentContour = _contour
# mask = np.zeros(imgFrame2.shape, np.uint8)
# cv2.drawContours(mask, self.currentContour, -1, (255,255,255),1)
# roi = cv2.bitwise_and(imgFrame2,imgFrame2,mask=self.currentContour)
# cv2.imshow("roii",roi)
self.currentBoundingRect = cv2.boundingRect(self.currentContour) #x,y,w,h
x = (self.currentBoundingRect[0] + self.currentBoundingRect[0] + self.currentBoundingRect[2])/2
y = (self.currentBoundingRect[1] + self.currentBoundingRect[1] + self.currentBoundingRect[3]) / 2
self.currentCenter = (x,y)
self.width = self.currentBoundingRect[2]
self.height = self.currentBoundingRect[3]
self.area = self.currentBoundingRect[2] * self.currentBoundingRect[3]
self.centerPositions.append(self.currentCenter)
self.dblCurrentDiagonalSize = math.sqrt(math.pow(self.currentBoundingRect[2], 2) + math.pow(self.currentBoundingRect[3], 2));
self.dblCurrentAspectRatio = float(self.currentBoundingRect[2])/float(self.currentBoundingRect[3])
x,y,w,h = self.currentBoundingRect #x,y,w,h
self.currentROI = srcImage[y:y+h, x:x+w]
self.rois = []
self.noOfTimesAppeared = 1
self.featureMatches = 0
# flags
self.blnStillBeingTracked = True;
self.blnCurrentMatchFoundOrNewBlob = True;
self.intNumOfConsecutiveFramesWithoutAMatch = 0;
def predictNextPosition(self):
numPositions = len(self.centerPositions)
if (numPositions == 1):
self.predictedNextPosition[0] = self.centerPositions[-1][0]
self.predictedNextPosition[1] = self.centerPositions[-1][1]
elif (numPositions == 2):
deltaX = self.centerPositions[1][0] - self.centerPositions[0][0]
deltaY = self.centerPositions[1][1] - self.centerPositions[0][1]
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY
elif (numPositions == 3):
sumOfXChanges = ((self.centerPositions[2][0] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][0] - self.centerPositions[0][0]) * 1)
deltaX = int(round(float(sumOfXChanges)/3.0))
sumOfYChanges = ((self.centerPositions[2][1] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][1] - self.centerPositions[0][1]) * 1)
deltaY = int(round(float(sumOfYChanges) / 3.0))
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX
self.predictedNextPosition[1] = self.centerPositions[-1][1] + deltaY
elif (numPositions == 4) :
sumOfXChanges = ((self.centerPositions[3][0] - self.centerPositions[2][0]) * 3) + \
((self.centerPositions[2][0] - self.centerPositions[1][0]) * 2) + \
((self.centerPositions[1][0] - self.centerPositions[0][0]) * 1)
deltaX = int(round(float(sumOfXChanges) / 6.0))
sumOfYChanges = ((self.centerPositions[3][1] - self.centerPositions[2][1]) * 3) + \
((self.centerPositions[2][1] - self.centerPositions[1][1]) * 2) + \
((self.centerPositions[1][1] - self.centerPositions[0][1]) * 1)
deltaY = int(round(float(sumOfYChanges) / 6.0))
self.predictedNextPosition[0] = self.centerPositions[-1][0] + deltaX;
self.predictedNextPosition[1
|
addBlobToExistingBlobs(currentFrameBlob, existingBlobs, intIndexOfLeastDistance)
|
conditional_block
|
chat.js
|
}
}
function chat_show()
{
$("chatbox").style.visibility = 'visible';
$("chatntf").style.right = '310px';
setCookie("cards_chatbox", "1", 365);
chat_resize();
post_center();
}
function chat_hide()
{
$("chatbox").style.visibility = 'hidden';
$("chatntf").style.right = '10px';
setCookie("cards_chatbox", "0", 365);
chat_resize();
post_center();
}
function chat_isopen()
{
if(getCookie("cards_chatbox") == "1")
return 1;
else
return 0;
}
function showhide_chat(item)
{
if($(item).style.visibility != 'visible')
{
$(item).style.visibility = 'visible';
$("chatntf").style.right = '310px';
setCookie("cards_chatbox", "1", 365);
}else{
$(item).style.visibility = 'hidden';
$("chatntf").style.right = '10px';
setCookie("cards_chatbox", "0", 365);
}
chat_resize();
post_center();
}
function chat_resize()
{
$("chatcontent").style.height = g_win_height - ($("cboxtop").offsetHeight + 30) + 'px';
var pha_content = $('pha_content');
if(pha_content)
{
if(chat_isopen())
pha_content.style.width = (g_win_width - 330) + 'px';
else
pha_content.style.width = (g_win_width - 1) + 'px';
}
var ct = $("chattext");
ct.scrollTop = ct.scrollHeight;
}
//window.onresize = chat_resize;
function chat_init()
{
$("chattextinput").onkeyup = function(e)
{
e = e || event;
if (e.keyCode === 13 && !e.shiftKey) {
chat_sendmsg();
}
return true;
}
chat_reload();
if(getCookie("cards_chatbox") == "1")
{
$("chatbox").style.visibility = 'visible';
$("chatntf").style.right = '310px';
chat_resize();
if(chat_croomid)
{
chatc_viewex(chat_croomid, chat_croomsign, 0);
}
}else{
$("chatbox").style.visibility = 'hidden';
$("chatntf").style.right = '10px';
}
}
/* conversation management */
function chatc_clear()
{
var o = $('chatntf');
o.innerHTML = "";
o.setAttribute('data-ccount', 0);
}
function chatc_create(users, userav, newcount, rid, csignature, usersloc)
{
var o = $('chatntf');
/* offline available busy away */
var avm = ["555555", "99cc66", "ff6633", "ffcc00"];
var nbv = "";
var ulist = "";
var maxut = o.getAttribute('data-maxt');
var cct = o.getAttribute('data-ccount');
var ppt = "";
cct = parseInt(cct) + 1;
if(users.length > maxut)
ppt = "<div class='chatntf_plus'></div>";
if(users.length < maxut)
maxut = users.length;
for(var i=0; i<maxut; i++)
{
//ulist += "<a href='#'><div class='chatntf_pic' onmouseover='vusr(this, \"" + users[i] + "\")' style=\"background: url('data/u" + usersloc[i] + "/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
if(usersloc[i] != 0) /* image available */
ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer; background: url('data/u" + usersloc[i] + "/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
else
ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer; background: url('images/failsafe/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
//ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer;\"><img src='data/u" + usersloc[i] + "/dp/2.jpg' onerror='failsafe_img(this, 2);'/><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
}
if(newcount) nbv = "<div class='chatntf_new' id='chatntfcidnew" + cct + "'>" + newcount + "</div>";
else nbv = "<div class='chatntf_new' id='chatntfcidnew" + cct + "' style='visibility: hidden;'>" + newcount + "</div>";
var ct = "<div class=\"chatntf_box\" onclick='chatc_view(\"" + rid + "\", \"" + csignature + "\")' data-rid='" + rid + "' data-cs='" + csignature + "' id='chatntfcid" + cct + "'><div class=\"chatntf_pic_st\"></div>" +
ulist + nbv + "<div class='chatntf_x' onclick=\"javascript: chatc_close('" + cct + "');\"></div>" + ppt + "</div><div style='clear:both;'></div>";
o.innerHTML += ct;
o.setAttribute('data-ccount', cct);
}
function chatc_createtest(nc)
{
var a = new Array('uqkhjYh', 'uvkhjYh', 'uqkhjYh', 'uvkhjYh');
var av = new Array(2, 1, 3, 0);
chatc_create(a, av, nc);
}
function chatc_close(cid)
{
$('chatntfcid' + cid).style.display = "none";
}
function chatc_show(cid)
{
}
/* timer call for chat */
function chat_timercall()
{
var cct = $("chatntf").getAttribute('data-ccount');
if(cct == 0)
chat_switchicon(1);
else
chat_switchicon(0);
}
/*
1 - show
2 - hide
*/
function chat_switchicon(mode)
{
if(mode == 1)
{
$("sideicon_chat").style.display = "inline";
$("chatntf").style.top = "34px";
}else{
$("sideicon_chat").style.display = "none";
$("chatntf").style.top = "0px";
}
}
function chatc_call(uid)
{
ajax_post("php/tasks/chatcall.php?u=" + uid, function(){
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
if(xmlhttp.responseText != "")
{
alert(xmlhttp.responseText);
}
}});
}
function chatc_view(rid, cs)
{
return chatc_viewex(rid, cs, 1);
}
function chatc_viewex(rid, cs, cpanelset)
{
if(cpanelset)
{
if(!chat_isopen())
{
chat_show();
}else{
if(rid == chat_croomid)
chat_hide();
}
}
chat_croomid = rid;
chat_croomsign = cs;
ajax_post("php/tasks/chatget.php?r=" + rid, function(){
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
if(xmlhttp.responseText != "")
{
var jm = {};
jm = JSON.parse(xmlhttp.responseText);
chatv_refresh(jm, 0);
}
}});
}
function chatv_refresh(cd, cadd)
{
var uftext = "";
var ustext = "";
var i =0;
var usercount = cd.users.length;
for(i=0; i<usercount; i++)
{
if(i) {uftext += ", "; ustext += ", ";}
uftext += cd.users[i].name;
if(i < 2)
ustext += "<a href='u" + cd.users[i].id + "' onmouseover='vusr(this, \"" + cd.users[i].id + "\")'>" + cd.users[i].name + "</a>";
}
if(usercount > 2)
{
ustext += " <span title='" + uftext + "'>(+" + (usercount - 2) + ")</span>";
}
$("chatbox_cusers").innerHTML = ustext;
/* make the lines */
var linecount = cd.lines.length;
var ltext = "";
var clines = "";
var lo
|
{
return unescape(y);
}
|
conditional_block
|
|
chat.js
|
function chat_isopen()
{
if(getCookie("cards_chatbox") == "1")
return 1;
else
return 0;
}
function showhide_chat(item)
{
if($(item).style.visibility != 'visible')
{
$(item).style.visibility = 'visible';
$("chatntf").style.right = '310px';
setCookie("cards_chatbox", "1", 365);
}else{
$(item).style.visibility = 'hidden';
$("chatntf").style.right = '10px';
setCookie("cards_chatbox", "0", 365);
}
chat_resize();
post_center();
}
function chat_resize()
{
$("chatcontent").style.height = g_win_height - ($("cboxtop").offsetHeight + 30) + 'px';
var pha_content = $('pha_content');
if(pha_content)
{
if(chat_isopen())
pha_content.style.width = (g_win_width - 330) + 'px';
else
pha_content.style.width = (g_win_width - 1) + 'px';
}
var ct = $("chattext");
ct.scrollTop = ct.scrollHeight;
}
//window.onresize = chat_resize;
function chat_init()
{
$("chattextinput").onkeyup = function(e)
{
e = e || event;
if (e.keyCode === 13 && !e.shiftKey) {
chat_sendmsg();
}
return true;
}
chat_reload();
if(getCookie("cards_chatbox") == "1")
{
$("chatbox").style.visibility = 'visible';
$("chatntf").style.right = '310px';
chat_resize();
if(chat_croomid)
{
chatc_viewex(chat_croomid, chat_croomsign, 0);
}
}else{
$("chatbox").style.visibility = 'hidden';
$("chatntf").style.right = '10px';
}
}
/* conversation management */
function chatc_clear()
{
var o = $('chatntf');
o.innerHTML = "";
o.setAttribute('data-ccount', 0);
}
function chatc_create(users, userav, newcount, rid, csignature, usersloc)
{
var o = $('chatntf');
/* offline available busy away */
var avm = ["555555", "99cc66", "ff6633", "ffcc00"];
var nbv = "";
var ulist = "";
var maxut = o.getAttribute('data-maxt');
var cct = o.getAttribute('data-ccount');
var ppt = "";
cct = parseInt(cct) + 1;
if(users.length > maxut)
ppt = "<div class='chatntf_plus'></div>";
if(users.length < maxut)
maxut = users.length;
for(var i=0; i<maxut; i++)
{
//ulist += "<a href='#'><div class='chatntf_pic' onmouseover='vusr(this, \"" + users[i] + "\")' style=\"background: url('data/u" + usersloc[i] + "/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
if(usersloc[i] != 0) /* image available */
ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer; background: url('data/u" + usersloc[i] + "/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
else
ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer; background: url('images/failsafe/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
//ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer;\"><img src='data/u" + usersloc[i] + "/dp/2.jpg' onerror='failsafe_img(this, 2);'/><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
}
if(newcount) nbv = "<div class='chatntf_new' id='chatntfcidnew" + cct + "'>" + newcount + "</div>";
else nbv = "<div class='chatntf_new' id='chatntfcidnew" + cct + "' style='visibility: hidden;'>" + newcount + "</div>";
var ct = "<div class=\"chatntf_box\" onclick='chatc_view(\"" + rid + "\", \"" + csignature + "\")' data-rid='" + rid + "' data-cs='" + csignature + "' id='chatntfcid" + cct + "'><div class=\"chatntf_pic_st\"></div>" +
ulist + nbv + "<div class='chatntf_x' onclick=\"javascript: chatc_close('" + cct + "');\"></div>" + ppt + "</div><div style='clear:both;'></div>";
o.innerHTML += ct;
o.setAttribute('data-ccount', cct);
}
function chatc_createtest(nc)
{
var a = new Array('uqkhjYh', 'uvkhjYh', 'uqkhjYh', 'uvkhjYh');
var av = new Array(2, 1, 3, 0);
chatc_create(a, av, nc);
}
function chatc_close(cid)
{
$('chatntfcid' + cid).style.display = "none";
}
function chatc_show(cid)
{
}
/* timer call for chat */
function chat_timercall()
{
var cct = $("chatntf").getAttribute('data-ccount');
if(cct == 0)
chat_switchicon(1);
else
chat_switchicon(0);
}
/*
1 - show
2 - hide
*/
function chat_switchicon(mode)
{
if(mode == 1)
{
$("sideicon_chat").style.display = "inline";
$("chatntf").style.top = "34px";
}else{
$("sideicon_chat").style.display = "none";
$("chatntf").style.top = "0px";
}
}
function chatc_call(uid)
{
ajax_post("php/tasks/chatcall.php?u=" + uid, function(){
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
if(xmlhttp.responseText != "")
{
alert(xmlhttp.responseText);
}
}});
}
function chatc_view(rid, cs)
{
return chatc_viewex(rid, cs, 1);
}
function chatc_viewex(rid, cs, cpanelset)
{
if(cpanelset)
{
if(!chat_isopen())
{
chat_show();
}else{
if(rid == chat_croomid)
chat_hide();
}
}
chat_croomid = rid;
chat_croomsign = cs;
ajax_post("php/tasks/chatget.php?r=" + rid, function(){
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
if(xmlhttp.responseText != "")
{
var jm = {};
jm = JSON.parse(xmlhttp.responseText);
chatv_refresh(jm, 0);
}
}});
}
function chatv_refresh(cd, cadd)
{
var uftext = "";
var ustext = "";
var i =0;
var usercount = cd.users.length;
for(i=0; i<usercount; i++)
{
if(i) {uftext += ", "; ustext += ", ";}
uftext += cd.users[i].name;
if(i < 2)
ustext += "<a href='u" + cd.users[i].id + "' onmouseover='vusr(this, \"" + cd.users[i].id + "\")'>" + cd.users[i].name + "</a>";
}
if(usercount > 2)
{
ustext += " <span title='" + uftext + "'>(+" + (usercount - 2) + ")</span>";
}
$("chatbox_cusers").innerHTML = ustext;
/* make the lines */
var linecount = cd.lines.length;
var ltext = "";
var clines = "";
var loe = "";
cd.lines.sort(function(a,b) { return parseFloat(a.time) - parseFloat(b.time) } );
i=0;
if(cadd && chat_lastlineid == cd.lines[0].id) i = 1;
for(; i<linecount; i++)
{
clines = "<p>" + cd
|
{
$("chatbox").style.visibility = 'hidden';
$("chatntf").style.right = '10px';
setCookie("cards_chatbox", "0", 365);
chat_resize();
post_center();
}
|
identifier_body
|
|
chat.js
|
(getCookie("cards_chatbox") == "1")
return 1;
else
return 0;
}
function showhide_chat(item)
{
if($(item).style.visibility != 'visible')
{
$(item).style.visibility = 'visible';
$("chatntf").style.right = '310px';
setCookie("cards_chatbox", "1", 365);
}else{
$(item).style.visibility = 'hidden';
$("chatntf").style.right = '10px';
setCookie("cards_chatbox", "0", 365);
}
chat_resize();
post_center();
}
function chat_resize()
{
$("chatcontent").style.height = g_win_height - ($("cboxtop").offsetHeight + 30) + 'px';
var pha_content = $('pha_content');
if(pha_content)
{
if(chat_isopen())
pha_content.style.width = (g_win_width - 330) + 'px';
else
pha_content.style.width = (g_win_width - 1) + 'px';
}
var ct = $("chattext");
ct.scrollTop = ct.scrollHeight;
}
//window.onresize = chat_resize;
function chat_init()
{
$("chattextinput").onkeyup = function(e)
{
e = e || event;
if (e.keyCode === 13 && !e.shiftKey) {
chat_sendmsg();
}
return true;
}
chat_reload();
if(getCookie("cards_chatbox") == "1")
{
$("chatbox").style.visibility = 'visible';
$("chatntf").style.right = '310px';
chat_resize();
if(chat_croomid)
{
chatc_viewex(chat_croomid, chat_croomsign, 0);
}
}else{
$("chatbox").style.visibility = 'hidden';
$("chatntf").style.right = '10px';
}
}
/* conversation management */
function chatc_clear()
{
var o = $('chatntf');
o.innerHTML = "";
o.setAttribute('data-ccount', 0);
}
function
|
(users, userav, newcount, rid, csignature, usersloc)
{
var o = $('chatntf');
/* offline available busy away */
var avm = ["555555", "99cc66", "ff6633", "ffcc00"];
var nbv = "";
var ulist = "";
var maxut = o.getAttribute('data-maxt');
var cct = o.getAttribute('data-ccount');
var ppt = "";
cct = parseInt(cct) + 1;
if(users.length > maxut)
ppt = "<div class='chatntf_plus'></div>";
if(users.length < maxut)
maxut = users.length;
for(var i=0; i<maxut; i++)
{
//ulist += "<a href='#'><div class='chatntf_pic' onmouseover='vusr(this, \"" + users[i] + "\")' style=\"background: url('data/u" + usersloc[i] + "/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
if(usersloc[i] != 0) /* image available */
ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer; background: url('data/u" + usersloc[i] + "/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
else
ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer; background: url('images/failsafe/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
//ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer;\"><img src='data/u" + usersloc[i] + "/dp/2.jpg' onerror='failsafe_img(this, 2);'/><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
}
if(newcount) nbv = "<div class='chatntf_new' id='chatntfcidnew" + cct + "'>" + newcount + "</div>";
else nbv = "<div class='chatntf_new' id='chatntfcidnew" + cct + "' style='visibility: hidden;'>" + newcount + "</div>";
var ct = "<div class=\"chatntf_box\" onclick='chatc_view(\"" + rid + "\", \"" + csignature + "\")' data-rid='" + rid + "' data-cs='" + csignature + "' id='chatntfcid" + cct + "'><div class=\"chatntf_pic_st\"></div>" +
ulist + nbv + "<div class='chatntf_x' onclick=\"javascript: chatc_close('" + cct + "');\"></div>" + ppt + "</div><div style='clear:both;'></div>";
o.innerHTML += ct;
o.setAttribute('data-ccount', cct);
}
function chatc_createtest(nc)
{
var a = new Array('uqkhjYh', 'uvkhjYh', 'uqkhjYh', 'uvkhjYh');
var av = new Array(2, 1, 3, 0);
chatc_create(a, av, nc);
}
function chatc_close(cid)
{
$('chatntfcid' + cid).style.display = "none";
}
function chatc_show(cid)
{
}
/* timer call for chat */
function chat_timercall()
{
var cct = $("chatntf").getAttribute('data-ccount');
if(cct == 0)
chat_switchicon(1);
else
chat_switchicon(0);
}
/*
1 - show
2 - hide
*/
function chat_switchicon(mode)
{
if(mode == 1)
{
$("sideicon_chat").style.display = "inline";
$("chatntf").style.top = "34px";
}else{
$("sideicon_chat").style.display = "none";
$("chatntf").style.top = "0px";
}
}
function chatc_call(uid)
{
ajax_post("php/tasks/chatcall.php?u=" + uid, function(){
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
if(xmlhttp.responseText != "")
{
alert(xmlhttp.responseText);
}
}});
}
function chatc_view(rid, cs)
{
return chatc_viewex(rid, cs, 1);
}
function chatc_viewex(rid, cs, cpanelset)
{
if(cpanelset)
{
if(!chat_isopen())
{
chat_show();
}else{
if(rid == chat_croomid)
chat_hide();
}
}
chat_croomid = rid;
chat_croomsign = cs;
ajax_post("php/tasks/chatget.php?r=" + rid, function(){
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
if(xmlhttp.responseText != "")
{
var jm = {};
jm = JSON.parse(xmlhttp.responseText);
chatv_refresh(jm, 0);
}
}});
}
function chatv_refresh(cd, cadd)
{
var uftext = "";
var ustext = "";
var i =0;
var usercount = cd.users.length;
for(i=0; i<usercount; i++)
{
if(i) {uftext += ", "; ustext += ", ";}
uftext += cd.users[i].name;
if(i < 2)
ustext += "<a href='u" + cd.users[i].id + "' onmouseover='vusr(this, \"" + cd.users[i].id + "\")'>" + cd.users[i].name + "</a>";
}
if(usercount > 2)
{
ustext += " <span title='" + uftext + "'>(+" + (usercount - 2) + ")</span>";
}
$("chatbox_cusers").innerHTML = ustext;
/* make the lines */
var linecount = cd.lines.length;
var ltext = "";
var clines = "";
var loe = "";
cd.lines.sort(function(a,b) { return parseFloat(a.time) - parseFloat(b.time) } );
i=0;
if(cadd && chat_lastlineid == cd.lines[0].id) i = 1;
for(; i<linecount; i++)
{
clines = "<p>" + cd.lines[i].line + "</p>"
if(cd.lines[i].tid % 2)
loe = "chatlineodd";
else
loe = "chatlineeven";
ltext += "<div class='chatbox_ci " + lo
|
chatc_create
|
identifier_name
|
chat.js
|
chat_resize();
post_center();
}
function chat_isopen()
{
if(getCookie("cards_chatbox") == "1")
return 1;
else
return 0;
}
function showhide_chat(item)
{
if($(item).style.visibility != 'visible')
{
$(item).style.visibility = 'visible';
$("chatntf").style.right = '310px';
setCookie("cards_chatbox", "1", 365);
}else{
$(item).style.visibility = 'hidden';
$("chatntf").style.right = '10px';
setCookie("cards_chatbox", "0", 365);
}
chat_resize();
post_center();
}
function chat_resize()
{
$("chatcontent").style.height = g_win_height - ($("cboxtop").offsetHeight + 30) + 'px';
var pha_content = $('pha_content');
if(pha_content)
{
if(chat_isopen())
pha_content.style.width = (g_win_width - 330) + 'px';
else
pha_content.style.width = (g_win_width - 1) + 'px';
}
var ct = $("chattext");
ct.scrollTop = ct.scrollHeight;
}
//window.onresize = chat_resize;
function chat_init()
{
$("chattextinput").onkeyup = function(e)
{
e = e || event;
if (e.keyCode === 13 && !e.shiftKey) {
chat_sendmsg();
}
return true;
}
chat_reload();
if(getCookie("cards_chatbox") == "1")
{
$("chatbox").style.visibility = 'visible';
$("chatntf").style.right = '310px';
chat_resize();
if(chat_croomid)
{
chatc_viewex(chat_croomid, chat_croomsign, 0);
}
}else{
$("chatbox").style.visibility = 'hidden';
$("chatntf").style.right = '10px';
}
}
/* conversation management */
function chatc_clear()
{
var o = $('chatntf');
o.innerHTML = "";
o.setAttribute('data-ccount', 0);
}
function chatc_create(users, userav, newcount, rid, csignature, usersloc)
{
var o = $('chatntf');
/* offline available busy away */
var avm = ["555555", "99cc66", "ff6633", "ffcc00"];
var nbv = "";
var ulist = "";
var maxut = o.getAttribute('data-maxt');
var cct = o.getAttribute('data-ccount');
var ppt = "";
cct = parseInt(cct) + 1;
if(users.length > maxut)
ppt = "<div class='chatntf_plus'></div>";
if(users.length < maxut)
maxut = users.length;
for(var i=0; i<maxut; i++)
{
//ulist += "<a href='#'><div class='chatntf_pic' onmouseover='vusr(this, \"" + users[i] + "\")' style=\"background: url('data/u" + usersloc[i] + "/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
if(usersloc[i] != 0) /* image available */
ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer; background: url('data/u" + usersloc[i] + "/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
else
ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer; background: url('images/failsafe/dp/2.jpg')\"><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
//ulist += "<a><div class='chatntf_pic' style=\"cursor: pointer;\"><img src='data/u" + usersloc[i] + "/dp/2.jpg' onerror='failsafe_img(this, 2);'/><div class=\"chatntf_availability\" style=\"background: #" + avm[userav[i]] + "\"></div></div></a>";
}
if(newcount) nbv = "<div class='chatntf_new' id='chatntfcidnew" + cct + "'>" + newcount + "</div>";
else nbv = "<div class='chatntf_new' id='chatntfcidnew" + cct + "' style='visibility: hidden;'>" + newcount + "</div>";
var ct = "<div class=\"chatntf_box\" onclick='chatc_view(\"" + rid + "\", \"" + csignature + "\")' data-rid='" + rid + "' data-cs='" + csignature + "' id='chatntfcid" + cct + "'><div class=\"chatntf_pic_st\"></div>" +
ulist + nbv + "<div class='chatntf_x' onclick=\"javascript: chatc_close('" + cct + "');\"></div>" + ppt + "</div><div style='clear:both;'></div>";
o.innerHTML += ct;
o.setAttribute('data-ccount', cct);
}
function chatc_createtest(nc)
{
var a = new Array('uqkhjYh', 'uvkhjYh', 'uqkhjYh', 'uvkhjYh');
var av = new Array(2, 1, 3, 0);
chatc_create(a, av, nc);
}
function chatc_close(cid)
{
$('chatntfcid' + cid).style.display = "none";
}
function chatc_show(cid)
{
}
/* timer call for chat */
function chat_timercall()
{
var cct = $("chatntf").getAttribute('data-ccount');
if(cct == 0)
chat_switchicon(1);
else
chat_switchicon(0);
}
/*
1 - show
2 - hide
*/
function chat_switchicon(mode)
{
if(mode == 1)
{
$("sideicon_chat").style.display = "inline";
$("chatntf").style.top = "34px";
}else{
$("sideicon_chat").style.display = "none";
$("chatntf").style.top = "0px";
}
}
function chatc_call(uid)
{
ajax_post("php/tasks/chatcall.php?u=" + uid, function(){
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
if(xmlhttp.responseText != "")
{
alert(xmlhttp.responseText);
}
}});
}
function chatc_view(rid, cs)
{
return chatc_viewex(rid, cs, 1);
}
function chatc_viewex(rid, cs, cpanelset)
{
if(cpanelset)
{
if(!chat_isopen())
{
chat_show();
}else{
if(rid == chat_croomid)
chat_hide();
}
}
chat_croomid = rid;
chat_croomsign = cs;
ajax_post("php/tasks/chatget.php?r=" + rid, function(){
if (xmlhttp.readyState==4 && xmlhttp.status==200)
{
if(xmlhttp.responseText != "")
{
var jm = {};
jm = JSON.parse(xmlhttp.responseText);
chatv_refresh(jm, 0);
}
}});
}
function chatv_refresh(cd, cadd)
{
var uftext = "";
var ustext = "";
var i =0;
var usercount = cd.users.length;
for(i=0; i<usercount; i++)
{
if(i) {uftext += ", "; ustext += ", ";}
uftext += cd.users[i].name;
if(i < 2)
ustext += "<a href='u" + cd.users[i].id + "' onmouseover='vusr(this, \"" + cd.users[i].id + "\")'>" + cd.users[i].name + "</a>";
}
if(usercount > 2)
{
ustext += " <span title='" + uftext + "'>(+" + (usercount - 2) + ")</span>";
}
$("chatbox_cusers").innerHTML = ustext;
/* make the lines */
var linecount = cd.lines.length;
var ltext = "";
var clines = "";
var loe = "";
cd.lines.sort(function(a,b) { return parseFloat(a.time) - parseFloat(b.time) } );
i=0;
if(cadd && chat_lastlineid == cd.lines[0].id) i = 1;
for(; i<linecount; i++)
{
clines = "<p>" + cd.lines[i].line + "</p>"
if(cd.lines[i].tid % 2)
loe = "chat
|
setCookie("cards_chatbox", "0", 365);
|
random_line_split
|
|
canvas_play.js
|
c.beginPath();
c.moveTo(x,y+radius);
c.lineTo(x,y+height-radius);
c.quadraticCurveTo(x,y+height,x+radius,y+height);
if (t) {
c.lineTo(x+width/2-4,y+height);
c.lineTo(x+width/2,y+height+7);
c.lineTo(x+width/2+4,y+height);
}
c.lineTo(x+width-radius,y+height);
c.quadraticCurveTo(x+width,y+height,x+width,y+height-radius);
c.lineTo(x+width,y+radius);
c.quadraticCurveTo(x+width,y,x+width-radius,y);
c.lineTo(x+radius,y);
c.quadraticCurveTo(x,y,x,y+radius);
c.stroke();
}
deleteMsg(link) {
for (let i = 1; i < 10; i++) {
setTimeout(() => {
if (i == 9) {
this.msg.pop();
link.killed = true;
} else link.sprite[0] = link.width * i;
}, i * 100);
}
}
addMsg(msg) {
if (this.msg.length > 2) this.deleteMsg(this.msg[this.msg.length - 1]);
const m = {}
m.text = game.serveText(msg, this.size + this.customTextSize);
m.sprite = [];
this.msg.unshift(m);
setTimeout(() => {
if (m.killed) return;
this.deleteMsg(m);
}, 10000);
}
renderMsg(m) {
/*
tmp.fillStyle = game.colorOfInterface.text;
tmp.font = `${14 * m.k}px monospace`;
m.text.forEach((b, j) => tmp.fillText(b,5,13*m.k+interval*j));
tmp.getImageData(0,0,4+m.rectWidth,m.height);
m.image = document.createElement('img');
m.image.src = tmp.canvas.toDataURL();
m.image.onload = () => stopRendering = false;
} */
console.time('draw')
const tmp = game.layer[3],
k = 0.8 + 0.2 * this.size + this.customTextSize,
interval = 12 * k,
rectHeight = m.text.length * interval + 4 * k,
rectWidth = m.text[0].length * 9 * k,
height = rectHeight + 4,
width = rectWidth + 4,
canvasHeight = height * 2 + 7,
last = width * 6;
console.log(m.text)
tmp.canvas.height = canvasHeight;
tmp.canvas.width = last;
for (let i = 0; i < 2; i++) {
tmp.fillStyle = game.colorOfInterface.main;
tmp.lineWidth = 3;
this.roundedRect(tmp, 2, (i ? 2 : 9 + height), rectWidth, rectHeight, 5, i);
tmp.fill();
tmp.fillStyle = game.colorOfInterface.text;
tmp.font = `${14 * k}px monospace`;
m.text.forEach((b, j) => tmp.fillText(b, 5, 13 * k + interval * j + (i ? 0 : 7 + height), rectWidth));
}
for (let i = width, alpha = 0.7; i < last; i += width, alpha *= 0.6) {
const result = tmp.getImageData(0, 0, width, canvasHeight),
r = result.data;
for (let j = 3; j < r.length; j += 4) {
r[j] *= alpha;
}
tmp.putImageData(result, i, 0);
}
console.timeEnd('draw')
return tmp.canvas;
}
/*
extMsg(link) {
for (let i = 1; i < 10; i++) {
setTimeout(() => {
console.log(link.alpha + ': ' + link.text[0]);
if (i == 9) {
this.msg.pop();
link.killed = true;
} else {
link.alpha = 1.0 - i * i / 81;
this.shiftAlphaMsg(link);
}
}, i * 100);
}
}
addMsg(msg) {
if (this.msg.length > 2) this.extMsg(this.msg[this.msg.length-1]);
const m = {}
//m.alpha = 1.0;
m.text = game.serveText(msg);
this.drawMsg(m, 1);
if (this.msg[0]) this.drawMsg(this.msg[0]);
this.msg.unshift(m);
setTimeout(() => {
if (m.killed) return;
this.extMsg(m);
}, 10000);
//this.drawMsg();
} */ /*
shiftAlphaMsg(m) {
const tmp = game.layer[3];
tmp.canvas.height = m.height;
tmp.canvas.width = 4 + m.rectWidth;
tmp.globalAlpha = m.alpha;
const image = document.createElement('img');
image.src = tmp.canvas.toDataURL();
image.onload = () => {
m.image = image;
stopRendering = false;
}
} */
/*
drawMsg(m, newMsg) {
console.log(m)
if (newMsg) m.k = 0.8 + 0.2 * this.size + this.customTextSize;
const tmp = game.layer[3],
interval = 12 * m.k;
if (newMsg) {
m.rectHeight = m.text.length * interval + 3,
m.rectWidth = m.text[0].length * 9 * m.k;
}
m.height = m.rectHeight + (newMsg ? 11 : 4);
tmp.canvas.height = m.height;
tmp.canvas.width = 4 + m.rectWidth;
tmp.fillStyle = game.colorOfInterface.main;
tmp.lineWidth = 3;
this.roundedRect(tmp,2,2,m.rectWidth,m.rectHeight,5,(newMsg ? true : false));
tmp.fill();
tmp.fillStyle = game.colorOfInterface.text;
tmp.font = `${14 * m.k}px monospace`;
m.text.forEach((b, j) => tmp.fillText(b,5,13*m.k+interval*j));
tmp.getImageData(0,0,4+m.rectWidth,m.height);
m.image = document.createElement('img');
m.image.src = tmp.canvas.toDataURL();
m.image.onload = () => stopRendering = false;
} */
/*
drawMsg() {
if (this.msg.length == 0) {
delete this.paintedMsg;
stopRendering = false;
return;
}
console.time('draw')
const k = 0.8 + 0.2 * this.size + this.customTextSize,
tmp = game.layer[3];
let interval = 12 * k,
s = 0,
largestWidth = 14;
this.msg.forEach((a, i) => {
const rectHeight = a.text.length * interval + 3,
rectWidth = a.text[0].length * 9 * k;
a.height = rectHeight + 4 + (i ? 0 : 7);
s += a.height;
tmp.canvas.height = a.height;
tmp.canvas.width = 4 + rectWidth;
if (tmp.canvas.width > largestWidth) largestWidth = tmp.canvas.width;
tmp.globalAlpha = a.alpha;
tmp.fillStyle = game.colorOfInterface.main;
tmp.lineWidth = 3;
this.roundedRect(tmp,2,2,rectWidth,rectHeight,5,(i ? undefined : true));
tmp.fill();
tmp.fillStyle = game.colorOfInterface.text;
tmp.font = `${14 * k}px monospace`;
a.text.forEach((b, j) => tmp.fillText(b,5,13*k+interval*j));
a.image = tmp.getImageData(0,0,4 + rectWidth,a.height);
});
tmp.canvas.height = s;
tmp.canvas.width = largestWidth;
for (let i = 0; i < this.msg.length; i++) {
s -= this.msg[i].height;
tmp.putImageData(this.msg[i].image,(largestWidth-this.msg[i].image.width)/2,s);
}
const image = document.createElement('img');
image.src = tmp.canvas.toDataURL();
image.onload = () => {
//resolve(image);
this.paintedMsg = image;
stopRendering = false;
//context.drawImage(image, X, Y - image.height);
console.timeEnd('draw')
};
//return image;
}
*/
}
class Cat extends Animal {
constructor(raw = {}) {
super();
this.id = raw.id;
this.sprite = 0;
this.speed = raw.speed;
this.place = raw.place;
this.size = raw.size;
this.h = 140;
this.w = 220;
this.out = Math.floor(this.w / 2 * this.size);
this.wsc = Math.floor(this.w * this.size);
|
this.hsc = Math.floor(this.h * this.size);
this.customTextSize = 0;
this.dir = raw.dir;
//присвоивать дефолтную картинку, а потом подгружать новую
this.skin = new Image();
|
random_line_split
|
|
canvas_play.js
|
(data, f = a => a) {
data.react = f;
this.spaces.add(data);
return data;
}
deleteSpace(link) {
return this.spaces.delete(link);
}
computeVector(A, B) {
return [
B[0] - A[0],
B[1] - A[1],
Math.round(Math.sqrt(Math.pow(B[0] - A[0], 2) + Math.pow(B[1] - A[1], 2)))
];
}
serveText(text, maxTextLength = 1) {
maxTextLength = Math.floor(maxTextLength * 40);
const result = [''], max = Math.floor(maxTextLength / 2);
text = text.match(/\s*[\S]+\s*/g);
for (let i = 0; i < text.length; i++) {
const s = text[i];
if (s.length > max) {
text.splice(i + 1, 0, s.slice(0, max), s.slice(max + 1));
continue;
}
if (result[result.length - 1].length + s.length > maxTextLength) {
result.push(s);
} else result[result.length - 1] += s;
}
return result;
}
initCanvas() {
w = document.documentElement.clientWidth * this.scale;
h = document.documentElement.clientHeight * this.scale;
let zero = this.layer[0],
first = this.layer[1],
under = this.layer[2],
totalHight = h * 0.85; //0.3 + 0.55
zero.canvas.width = w;
zero.canvas.height = totalHight;
first.canvas.width = w;
first.canvas.height = totalHight;
under.canvas.width = w;
under.canvas.height = totalHight;
this.area.range = [0,h*0.55,w,totalHight]
x = w / 160;
y = h * 0.3 / 27;
}
async render() {
console.time('render')
if (stopRendering) return requestAnimationFrame(game.render);
//при изменениее game.scale:
//1) перерисовать under-layer
//2) пересчитать параметры
const l = game.layer[game.nowl],
areaHight = h * 0.55;
game.all.sort((a,b) => a.place[1] - b.place[1]);
game.all.forEach(i => {
const placeX = Math.floor(i.place[0] * x),
placeY = Math.floor(areaHight + i.place[1] * y);
l.drawImage(i.skin, i.sprite, i.dir ? 0 : i.h,
i.w, i.h, placeX - i.out,
placeY - i.hsc,
i.wsc, i.hsc);
// if (i.paintedMsg)
// l.drawImage(i.paintedMsg, Math.floor(i.place[0] * x - i.paintedMsg.width/2/*- i.out*/),
// Math.floor(areaHight + i.place[1] * y - i.hsc - i.paintedMsg.height));
if (i.msg) {
for (let j = 0, p = 0; j < i.msg.length; j++) {
console.log(i.msg[j])
l.drawImage(i.msg[j].image, placeX, placeY - i.hsc - p);
p += i.msg[j].height;
}
}
});
game.nowl ^= 1;
game.layer[game.nowl].clearRect(0, 0, w, h);
requestAnimationFrame(game.render);
stopRendering = true;
console.timeEnd('render')
}
space(s = {}, X, Y) {
switch (s.type) {
case 0:
s = s.range;
if (X >= s[0] && X <= s[2] &&
Y >= s[1] && Y <= s[3]) return true;
break;
}
}
openConnection() {
//if a connection already exists, try to close the connection
if (ws) ws.close();
ws = new WebSocket(this.host, 'play');
ws.onopen = () => {
get('/getCookie').then(res => {
if (res.code == 1) send(100, res.headers)
else console.log('Ошибка авторизации');
});
}
ws.onmessage = e => {
const {code, msg} = JSON.parse(e.data);
console.log({code,msg});
switch (code) {
case 100:
game.time.updateTime(msg.time);
game.location = new Location(msg.loc);
ID = msg.id;
requestAnimationFrame(game.render);
break;
case 101:
game.cats.get(msg.id).addMsg(msg.text);
break;
case 104:
game.cats.get(msg.id).walk(msg.msg);
break;
case 105:
game.location.clear();
game.location.fill(msg.fill);
break;
case 107:
if (game.cats.get(msg.id)) return;
new Cat(msg);
break;
case 108:
game.time.updateTime(msg);
break;
}
}
}
}
var game = new Game();
class Animal {
constructor(raw = {}) {
this.msg = [];
}
roundedRect(c, x, y, width, height, radius, t) {
c.beginPath();
c.moveTo(x,y+radius);
c.lineTo(x,y+height-radius);
c.quadraticCurveTo(x,y+height,x+radius,y+height);
if (t) {
c.lineTo(x+width/2-4,y+height);
c.lineTo(x+width/2,y+height+7);
c.lineTo(x+width/2+4,y+height);
}
c.lineTo(x+width-radius,y+height);
c.quadraticCurveTo(x+width,y+height,x+width,y+height-radius);
c.lineTo(x+width,y+radius);
c.quadraticCurveTo(x+width,y,x+width-radius,y);
c.lineTo(x+radius,y);
c.quadraticCurveTo(x,y,x,y+radius);
c.stroke();
}
deleteMsg(link) {
for (let i = 1; i < 10; i++) {
setTimeout(() => {
if (i == 9) {
this.msg.pop();
link.killed = true;
} else link.sprite[0] = link.width * i;
}, i * 100);
}
}
addMsg(msg) {
if (this.msg.length > 2) this.deleteMsg(this.msg[this.msg.length - 1]);
const m = {}
m.text = game.serveText(msg, this.size + this.customTextSize);
m.sprite = [];
this.msg.unshift(m);
setTimeout(() => {
if (m.killed) return;
this.deleteMsg(m);
}, 10000);
}
renderMsg(m) {
/*
tmp.fillStyle = game.colorOfInterface.text;
tmp.font = `${14 * m.k}px monospace`;
m.text.forEach((b, j) => tmp.fillText(b,5,13*m.k+interval*j));
tmp.getImageData(0,0,4+m.rectWidth,m.height);
m.image = docume
|
this.roundedRect(tmp, 2, (i ? 2 : 9 + height), rectWidth, rectHeight, 5, i);
tmp.fill();
tmp.fillStyle = game.colorOfInterface.text;
tmp.font = `${14 * k}px monospace`;
m.text.forEach((b, j) => tmp.fillText(b, 5, 13 * k + interval * j + (i ? 0 : 7 + height), rectWidth));
}
for (let i = width, alpha = 0.7; i < last; i += width, alpha *= 0.6) {
const result = tmp.getImageData(0, 0, width, canvasHeight),
r = result.data;
for (let j = 3; j < r.length; j += 4) {
r[j] *= alpha;
}
tmp.putImageData(result, i, 0);
}
console.timeEnd('draw')
return tmp.canvas;
|
nt.createElement('img');
m.image.src = tmp.canvas.toDataURL();
m.image.onload = () => stopRendering = false;
} */
console.time('draw')
const tmp = game.layer[3],
k = 0.8 + 0.2 * this.size + this.customTextSize,
interval = 12 * k,
rectHeight = m.text.length * interval + 4 * k,
rectWidth = m.text[0].length * 9 * k,
height = rectHeight + 4,
width = rectWidth + 4,
canvasHeight = height * 2 + 7,
last = width * 6;
console.log(m.text)
tmp.canvas.height = canvasHeight;
tmp.canvas.width = last;
for (let i = 0; i < 2; i++) {
tmp.fillStyle = game.colorOfInterface.main;
tmp.lineWidth = 3;
|
identifier_body
|
canvas_play.js
|
11 : 4);
tmp.canvas.height = m.height;
tmp.canvas.width = 4 + m.rectWidth;
tmp.fillStyle = game.colorOfInterface.main;
tmp.lineWidth = 3;
this.roundedRect(tmp,2,2,m.rectWidth,m.rectHeight,5,(newMsg ? true : false));
tmp.fill();
tmp.fillStyle = game.colorOfInterface.text;
tmp.font = `${14 * m.k}px monospace`;
m.text.forEach((b, j) => tmp.fillText(b,5,13*m.k+interval*j));
tmp.getImageData(0,0,4+m.rectWidth,m.height);
m.image = document.createElement('img');
m.image.src = tmp.canvas.toDataURL();
m.image.onload = () => stopRendering = false;
} */
/*
drawMsg() {
if (this.msg.length == 0) {
delete this.paintedMsg;
stopRendering = false;
return;
}
console.time('draw')
const k = 0.8 + 0.2 * this.size + this.customTextSize,
tmp = game.layer[3];
let interval = 12 * k,
s = 0,
largestWidth = 14;
this.msg.forEach((a, i) => {
const rectHeight = a.text.length * interval + 3,
rectWidth = a.text[0].length * 9 * k;
a.height = rectHeight + 4 + (i ? 0 : 7);
s += a.height;
tmp.canvas.height = a.height;
tmp.canvas.width = 4 + rectWidth;
if (tmp.canvas.width > largestWidth) largestWidth = tmp.canvas.width;
tmp.globalAlpha = a.alpha;
tmp.fillStyle = game.colorOfInterface.main;
tmp.lineWidth = 3;
this.roundedRect(tmp,2,2,rectWidth,rectHeight,5,(i ? undefined : true));
tmp.fill();
tmp.fillStyle = game.colorOfInterface.text;
tmp.font = `${14 * k}px monospace`;
a.text.forEach((b, j) => tmp.fillText(b,5,13*k+interval*j));
a.image = tmp.getImageData(0,0,4 + rectWidth,a.height);
});
tmp.canvas.height = s;
tmp.canvas.width = largestWidth;
for (let i = 0; i < this.msg.length; i++) {
s -= this.msg[i].height;
tmp.putImageData(this.msg[i].image,(largestWidth-this.msg[i].image.width)/2,s);
}
const image = document.createElement('img');
image.src = tmp.canvas.toDataURL();
image.onload = () => {
//resolve(image);
this.paintedMsg = image;
stopRendering = false;
//context.drawImage(image, X, Y - image.height);
console.timeEnd('draw')
};
//return image;
}
*/
}
class Cat extends Animal {
constructor(raw = {}) {
super();
this.id = raw.id;
this.sprite = 0;
this.speed = raw.speed;
this.place = raw.place;
this.size = raw.size;
this.h = 140;
this.w = 220;
this.out = Math.floor(this.w / 2 * this.size);
this.wsc = Math.floor(this.w * this.size);
this.hsc = Math.floor(this.h * this.size);
this.customTextSize = 0;
this.dir = raw.dir;
//присвоивать дефолтную картинку, а потом подгружать новую
this.skin = new Image();
loadImage(`/img/players?r=${raw.skin}`, async a => {
this.skin = await this.combineSkin(a);
stopRendering = false;
});
game.cats.set(raw.id, this);
game.all.push(this);
}
delete() {
const i = game.all.findIndex(a => a == this);
if (i != -1) game.all.splice(i, 1);
game.cats.delete(this.id);
}
combineSkin(img, bits = 0) {
return new Promise(resolve => {
let image = document.createElement('img');
const raw = document.createElement('canvas').getContext('2d'),
result = document.createElement('canvas').getContext('2d'),
tmp = document.createElement('canvas').getContext('2d'),
end = () => {
image = document.createElement('img');
image.src = result.canvas.toDataURL('image/png');
image.onload = () => resolve(image);
};
result.canvas.width = 2640;
result.canvas.height = 280;
raw.canvas.width = 2640;
raw.canvas.height = 700;
raw.drawImage(img,0,0);
let body = raw.getImageData(0,0,2640,140);
result.putImageData(body,0,0);
for (let i = 0; i <= 2420; i += 220)
result.putImageData(game.canvas.rotateHorizontally(raw.getImageData(i,0,220,140)), i, 140);
tmp.canvas.width = 2640;
tmp.canvas.height = 280;
tmp.putImageData(raw.getImageData(0, 140, 2640, 140),0,0);
for (let i = 0; i <= 2420; i += 220) {
tmp.putImageData(game.canvas.rotateHorizontally(raw.getImageData(i,
(bits & 1 ? 280 : 140),220,140)),i,140);
}
image.src = tmp.canvas.toDataURL('image/png');
image.onload = () => {
result.drawImage(image,0,0);
if (bits & 2) {
tmp.clearRect(0,0,2640,280);
tmp.putImageData(raw.getImageData(0, 320, 2640, 140),0,0);
for (let i = 0; i <= 2420; i += 220)
tmp.putImageData(game.canvas.rotateHorizontally(raw.getImageData(i,0,220,140)), i, 140);
image = document.createElement('img');
image.src = tmp.canvas.toDataURL('image/png');
image.onload = () => {
result.drawImage(image,0,0);
end();
}
} else end();
}
});
}
walkAnimation() {
if (this.walkAnimationInterval) return;
this.sprite = 880;
this.walkAnimationInterval = setInterval(() => {
if (this.sprite >= 2420) this.sprite = 880
else this.sprite += 220;
}, 120);
}
stopWalkAnimation() {
this.sprite = 0;
clearInterval(this.walkAnimationInterval);
delete this.walkAnimationInterval;
}
walk(to) {
clearInterval(this.walkInterval);
this.walkAnimation();
const v = game.computeVector(this.place, to),
speed = this.speed / 1000 * 40,
t = v[2] / speed,
speedX = v[0] / t, speedY = v[1] / t;
let gone = 0;
if (v[0] < 0) this.dir = 0
else this.dir = 1;
this.walkInterval = setInterval(() => {
stopRendering = false;
gone += speed;
if (v[2] <= gone) {
this.stopWalk();
this.stopWalkAnimation();
return;
}
this.place[0] += speedX;
this.place[1] += speedY;
}, 40);
}
stopWalk() {
clearInterval(this.walkInterval)
}
}
class Location {
constructor(raw = {}) {
this.area = new Image();
loadImage(`/img/area?r=${raw.area}`, img => {
this.area = img;
this.drawArea();
});
this.fill(raw.fill);
}
fill(raw = []) {
raw.forEach(a => new Cat(a));
}
drawArea() {
const l = game.layer[2],
p = l.createPattern(this.area, 'repeat');
l.fillStyle = p;
l.fillRect(0, h * 0.55, w, h);
}
clear() {
game.cats.forEach(cat => {
cat.delete();
});
stopRendering = false;
}
}
async function get(url, options = {}) {
const res = await fetch(url, options);
if (options.text) return await res.text()
else return await res.json();
}
function send(code, msg) {
if (ws.readyState === WebSocket.OPEN) {
ws.send(JSON.stringify({ code, msg }));
return true;
}
}
function loadImage(path, f = a => a) {
const img = new Image();
img.src = path;
img.onload = () => f(img);
}
/*
game.computeVectorsStart = msg => {
if (msg.t - Date.now() <= 0) return msg.to;
const full = game.computeVector(msg.from, msg.to),
larger = Math.abs(Math.abs(full[0]) > Math
|
.abs(full
|
identifier_name
|
|
s_expressions.rs
|
use SyntaxKind::*;
impl From<SyntaxKind> for rowan::SyntaxKind {
fn from(kind: SyntaxKind) -> Self {
Self(kind as u16)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
enum Lang {}
impl rowan::Language for Lang {
type Kind = SyntaxKind;
fn kind_from_raw(raw: rowan::SyntaxKind) -> Self::Kind {
assert!(raw.0 <= ROOT as u16);
unsafe { std::mem::transmute::<u16, SyntaxKind>(raw.0) }
}
fn kind_to_raw(kind: Self::Kind) -> rowan::SyntaxKind {
kind.into()
}
}
type SyntaxNode = rowan::SyntaxNode<Lang>;
#[allow(unused)]
type SyntaxToken = rowan::SyntaxToken<Lang>;
#[allow(unused)]
type SyntaxElement = rowan::NodeOrToken<SyntaxNode, SyntaxToken>;
/// GreenNode is an immutable tree, which is cheap to change,
/// but doesn't contain offsets and parent pointers.
use rowan::GreenNode;
/// You can construct GreenNodes by hand, but a builder
/// is helpful for top-down parsers: it maintains a stack
/// of currently in-progress nodes
use rowan::GreenNodeBuilder;
/// This is the main type this crate exports.
/// It is also immutable, like a GreenNode,
/// but it contains parent pointers, offsets, and
/// has identity semantics.
/// SyntaxNode exist in borrowed and owned flavors,
/// which is controlled by the `R` parameter.
struct Parse {
green_node: rowan::GreenNode,
#[allow(unused)]
errors: Vec<String>,
}
impl Parse {
fn syntax(&self) -> Root {
Root::cast(SyntaxNode::new_root(self.green_node.clone())).unwrap()
}
}
/// Now, let's write a parser.
/// Note that `parse` does not return a `Result`:
/// by design, syntax tree can be build even for
/// completely invalid source code.
fn parse(text: &str) -> Parse {
struct Parser {
/// input tokens, including whitespace,
/// in *reverse* order.
tokens: Vec<(SyntaxKind, SmolStr)>,
/// the in-progress tree.
builder: GreenNodeBuilder<'static>,
/// the list of syntax errors we've accumulated
/// so far.
errors: Vec<String>,
}
enum
|
{
Eof,
RParen,
Ok,
}
impl Parser {
fn parse(mut self) -> Parse {
// Make sure that the root node covers all source
self.builder.start_node(ROOT.into());
// Parse a list of S-expressions
loop {
match self.sexp() {
SexpRes::Eof => break,
SexpRes::RParen => {
self.builder.start_node(ERROR.into());
self.errors.push("unmatched `)`".to_string());
self.bump(); // be sure to chug along in case of error
self.builder.finish_node();
}
SexpRes::Ok => (),
}
}
// Don't forget to eat *trailing* whitespace
self.skip_ws();
// Close the root node.
self.builder.finish_node();
// Turn the builder into a complete node.
let green: GreenNode = self.builder.finish();
// Construct a `SyntaxNode` from `GreenNode`,
// using errors as the root data.
Parse { green_node: green, errors: self.errors }
}
fn list(&mut self) {
// Start the list node
self.builder.start_node(LIST.into());
self.bump(); // '('
loop {
match self.sexp() {
SexpRes::Eof => {
self.errors.push("expected `)`".to_string());
break;
}
SexpRes::RParen => {
self.bump();
break;
}
SexpRes::Ok => (),
}
}
// close the list node
self.builder.finish_node();
}
fn sexp(&mut self) -> SexpRes {
// Eat leading whitespace
self.skip_ws();
// Either a list, and atom, a closing paren
// or an eof.
let t = match self.current() {
None => return SexpRes::Eof,
Some(R_PAREN) => return SexpRes::RParen,
Some(t) => t,
};
match t {
L_PAREN => self.list(),
WORD => {
self.builder.start_node(ATOM.into());
self.bump();
self.builder.finish_node();
}
ERROR => self.bump(),
_ => unreachable!(),
}
SexpRes::Ok
}
fn bump(&mut self) {
let (kind, text) = self.tokens.pop().unwrap();
self.builder.token(kind.into(), text);
}
fn current(&self) -> Option<SyntaxKind> {
self.tokens.last().map(|(kind, _)| *kind)
}
fn skip_ws(&mut self) {
while self.current() == Some(WHITESPACE) {
self.bump()
}
}
}
let mut tokens = lex(text);
tokens.reverse();
Parser { tokens, builder: GreenNodeBuilder::new(), errors: Vec::new() }.parse()
}
/// Let's check that the parser works as expected
#[test]
fn test_parser() {
let text = "(+ (* 15 2) 62)";
let node = parse(text);
assert_eq!(
format!("{:?}", node),
"ROOT@[0; 15)", // root node, spanning 15 bytes
);
assert_eq!(node.children().count(), 1);
let list = node.children().next().unwrap();
let children = list.children().map(|child| format!("{:?}", child)).collect::<Vec<_>>();
assert_eq!(
children,
vec![
"L_PAREN@[0; 1)".to_string(),
"ATOM@[1; 2)".to_string(),
"WHITESPACE@[2; 3)".to_string(), // note, explicit whitespace!
"LIST@[3; 11)".to_string(),
"WHITESPACE@[11; 12)".to_string(),
"ATOM@[12; 14)".to_string(),
"R_PAREN@[14; 15)".to_string(),
]
);
}
/// So far, we've been working with a homogeneous untyped tree.
/// It's nice to provide generic tree operations, like traversals,
/// but it's a bad fit for semantic analysis.
/// This crate itself does not provide AST facilities directly,
/// but it is possible to layer AST on top of `SyntaxNode` API.
/// Let's write a function to evaluate S-expression.
///
/// For that, let's define AST nodes.
/// It'll be quite a bunch of repetitive code, so we'll use a macro.
///
/// For a real language, you'd want to generate an AST. I find a
/// combination of `serde`, `ron` and `tera` crates invaluable for that!
macro_rules! ast_node {
($ast:ident, $kind:ident) => {
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct $ast(SyntaxNode);
impl $ast {
#[allow(unused)]
fn cast(node: SyntaxNode) -> Option<Self> {
if node.kind() == $kind {
Some(Self(node))
} else {
None
}
}
}
};
}
ast_node!(Root, ROOT);
ast_node!(Atom, ATOM);
ast_node!(List, LIST);
// Sexp is slightly different, so let's do it by hand.
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct Sexp(SyntaxNode);
enum SexpKind {
Atom(Atom),
List(List),
}
impl Sexp {
fn cast(node: SyntaxNode) -> Option<Self> {
if Atom::cast(node.clone()).is_some() || List::cast(node.clone()).is_some() {
Some(Sexp(node))
} else {
None
}
}
fn kind(&self) -> SexpKind {
Atom::cast(self.0.clone())
.map(SexpKind::Atom)
.or_else(|| List::cast(self.0.clone()).map(SexpKind::List))
.unwrap()
}
}
// Let's enhance AST nodes with ancillary functions and
// eval.
impl Root {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
}
enum Op {
Add,
Sub,
Div,
Mul,
}
impl Atom {
fn eval(&self) -> Option<i64> {
self.text().parse().ok()
}
fn as_op(&self) -> Option<Op> {
let op = match self.text().as_str() {
"+" => Op::Add,
"-" => Op::Sub,
"*" => Op::Mul,
"/" => Op::Div,
_ => return None,
};
Some(op)
}
fn text(&self) -> &SmolStr {
match &self.0.green().children().next() {
Some(rowan::NodeOrToken::Token(token)) => token.text(),
_ => unreachable!(),
}
}
}
impl List {
fn sexps(&self) -> impl Iterator<Item = S
|
SexpRes
|
identifier_name
|
s_expressions.rs
|
stack
/// of currently in-progress nodes
use rowan::GreenNodeBuilder;
/// This is the main type this crate exports.
/// It is also immutable, like a GreenNode,
/// but it contains parent pointers, offsets, and
/// has identity semantics.
/// SyntaxNode exist in borrowed and owned flavors,
/// which is controlled by the `R` parameter.
struct Parse {
green_node: rowan::GreenNode,
#[allow(unused)]
errors: Vec<String>,
}
impl Parse {
fn syntax(&self) -> Root {
Root::cast(SyntaxNode::new_root(self.green_node.clone())).unwrap()
}
}
/// Now, let's write a parser.
/// Note that `parse` does not return a `Result`:
/// by design, syntax tree can be build even for
/// completely invalid source code.
fn parse(text: &str) -> Parse {
struct Parser {
/// input tokens, including whitespace,
/// in *reverse* order.
tokens: Vec<(SyntaxKind, SmolStr)>,
/// the in-progress tree.
builder: GreenNodeBuilder<'static>,
/// the list of syntax errors we've accumulated
/// so far.
errors: Vec<String>,
}
enum SexpRes {
Eof,
RParen,
Ok,
}
impl Parser {
fn parse(mut self) -> Parse {
// Make sure that the root node covers all source
self.builder.start_node(ROOT.into());
// Parse a list of S-expressions
loop {
match self.sexp() {
SexpRes::Eof => break,
SexpRes::RParen => {
self.builder.start_node(ERROR.into());
self.errors.push("unmatched `)`".to_string());
self.bump(); // be sure to chug along in case of error
self.builder.finish_node();
}
SexpRes::Ok => (),
}
}
// Don't forget to eat *trailing* whitespace
self.skip_ws();
// Close the root node.
self.builder.finish_node();
// Turn the builder into a complete node.
let green: GreenNode = self.builder.finish();
// Construct a `SyntaxNode` from `GreenNode`,
// using errors as the root data.
Parse { green_node: green, errors: self.errors }
}
fn list(&mut self) {
// Start the list node
self.builder.start_node(LIST.into());
self.bump(); // '('
loop {
match self.sexp() {
SexpRes::Eof => {
self.errors.push("expected `)`".to_string());
break;
}
SexpRes::RParen => {
self.bump();
break;
}
SexpRes::Ok => (),
}
}
// close the list node
self.builder.finish_node();
}
fn sexp(&mut self) -> SexpRes {
// Eat leading whitespace
self.skip_ws();
// Either a list, and atom, a closing paren
// or an eof.
let t = match self.current() {
None => return SexpRes::Eof,
Some(R_PAREN) => return SexpRes::RParen,
Some(t) => t,
};
match t {
L_PAREN => self.list(),
WORD => {
self.builder.start_node(ATOM.into());
self.bump();
self.builder.finish_node();
}
ERROR => self.bump(),
_ => unreachable!(),
}
SexpRes::Ok
}
fn bump(&mut self) {
let (kind, text) = self.tokens.pop().unwrap();
self.builder.token(kind.into(), text);
}
fn current(&self) -> Option<SyntaxKind> {
self.tokens.last().map(|(kind, _)| *kind)
}
fn skip_ws(&mut self) {
while self.current() == Some(WHITESPACE) {
self.bump()
}
}
}
let mut tokens = lex(text);
tokens.reverse();
Parser { tokens, builder: GreenNodeBuilder::new(), errors: Vec::new() }.parse()
}
/// Let's check that the parser works as expected
#[test]
fn test_parser() {
let text = "(+ (* 15 2) 62)";
let node = parse(text);
assert_eq!(
format!("{:?}", node),
"ROOT@[0; 15)", // root node, spanning 15 bytes
);
assert_eq!(node.children().count(), 1);
let list = node.children().next().unwrap();
let children = list.children().map(|child| format!("{:?}", child)).collect::<Vec<_>>();
assert_eq!(
children,
vec![
"L_PAREN@[0; 1)".to_string(),
"ATOM@[1; 2)".to_string(),
"WHITESPACE@[2; 3)".to_string(), // note, explicit whitespace!
"LIST@[3; 11)".to_string(),
"WHITESPACE@[11; 12)".to_string(),
"ATOM@[12; 14)".to_string(),
"R_PAREN@[14; 15)".to_string(),
]
);
}
/// So far, we've been working with a homogeneous untyped tree.
/// It's nice to provide generic tree operations, like traversals,
/// but it's a bad fit for semantic analysis.
/// This crate itself does not provide AST facilities directly,
/// but it is possible to layer AST on top of `SyntaxNode` API.
/// Let's write a function to evaluate S-expression.
///
/// For that, let's define AST nodes.
/// It'll be quite a bunch of repetitive code, so we'll use a macro.
///
/// For a real language, you'd want to generate an AST. I find a
/// combination of `serde`, `ron` and `tera` crates invaluable for that!
macro_rules! ast_node {
($ast:ident, $kind:ident) => {
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct $ast(SyntaxNode);
impl $ast {
#[allow(unused)]
fn cast(node: SyntaxNode) -> Option<Self> {
if node.kind() == $kind {
Some(Self(node))
} else {
None
}
}
}
};
}
ast_node!(Root, ROOT);
ast_node!(Atom, ATOM);
ast_node!(List, LIST);
// Sexp is slightly different, so let's do it by hand.
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct Sexp(SyntaxNode);
enum SexpKind {
Atom(Atom),
List(List),
}
impl Sexp {
fn cast(node: SyntaxNode) -> Option<Self> {
if Atom::cast(node.clone()).is_some() || List::cast(node.clone()).is_some() {
Some(Sexp(node))
} else {
None
}
}
fn kind(&self) -> SexpKind {
Atom::cast(self.0.clone())
.map(SexpKind::Atom)
.or_else(|| List::cast(self.0.clone()).map(SexpKind::List))
.unwrap()
}
}
// Let's enhance AST nodes with ancillary functions and
// eval.
impl Root {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
}
enum Op {
Add,
Sub,
Div,
Mul,
}
impl Atom {
fn eval(&self) -> Option<i64> {
self.text().parse().ok()
}
fn as_op(&self) -> Option<Op> {
let op = match self.text().as_str() {
"+" => Op::Add,
"-" => Op::Sub,
"*" => Op::Mul,
"/" => Op::Div,
_ => return None,
};
Some(op)
}
fn text(&self) -> &SmolStr {
match &self.0.green().children().next() {
Some(rowan::NodeOrToken::Token(token)) => token.text(),
_ => unreachable!(),
}
}
}
impl List {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
fn eval(&self) -> Option<i64> {
let op = match self.sexps().nth(0)?.kind() {
SexpKind::Atom(atom) => atom.as_op()?,
_ => return None,
};
let arg1 = self.sexps().nth(1)?.eval()?;
let arg2 = self.sexps().nth(2)?.eval()?;
let res = match op {
Op::Add => arg1 + arg2,
Op::Sub => arg1 - arg2,
Op::Mul => arg1 * arg2,
Op::Div if arg2 == 0 => return None,
Op::Div => arg1 / arg2,
};
Some(res)
}
}
impl Sexp {
fn eval(&self) -> Option<i64> {
match self.kind() {
SexpKind::Atom(atom) => atom.eval(),
SexpKind::List(list) => list.eval(),
}
}
}
/// Let's test the eval!
fn main() {
|
let sexps = "
92
(+ 62 30)
(/ 92 0)
nan
|
random_line_split
|
|
s_expressions.rs
|
use SyntaxKind::*;
impl From<SyntaxKind> for rowan::SyntaxKind {
fn from(kind: SyntaxKind) -> Self {
Self(kind as u16)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
enum Lang {}
impl rowan::Language for Lang {
type Kind = SyntaxKind;
fn kind_from_raw(raw: rowan::SyntaxKind) -> Self::Kind {
assert!(raw.0 <= ROOT as u16);
unsafe { std::mem::transmute::<u16, SyntaxKind>(raw.0) }
}
fn kind_to_raw(kind: Self::Kind) -> rowan::SyntaxKind {
kind.into()
}
}
type SyntaxNode = rowan::SyntaxNode<Lang>;
#[allow(unused)]
type SyntaxToken = rowan::SyntaxToken<Lang>;
#[allow(unused)]
type SyntaxElement = rowan::NodeOrToken<SyntaxNode, SyntaxToken>;
/// GreenNode is an immutable tree, which is cheap to change,
/// but doesn't contain offsets and parent pointers.
use rowan::GreenNode;
/// You can construct GreenNodes by hand, but a builder
/// is helpful for top-down parsers: it maintains a stack
/// of currently in-progress nodes
use rowan::GreenNodeBuilder;
/// This is the main type this crate exports.
/// It is also immutable, like a GreenNode,
/// but it contains parent pointers, offsets, and
/// has identity semantics.
/// SyntaxNode exist in borrowed and owned flavors,
/// which is controlled by the `R` parameter.
struct Parse {
green_node: rowan::GreenNode,
#[allow(unused)]
errors: Vec<String>,
}
impl Parse {
fn syntax(&self) -> Root {
Root::cast(SyntaxNode::new_root(self.green_node.clone())).unwrap()
}
}
/// Now, let's write a parser.
/// Note that `parse` does not return a `Result`:
/// by design, syntax tree can be build even for
/// completely invalid source code.
fn parse(text: &str) -> Parse {
struct Parser {
/// input tokens, including whitespace,
/// in *reverse* order.
tokens: Vec<(SyntaxKind, SmolStr)>,
/// the in-progress tree.
builder: GreenNodeBuilder<'static>,
/// the list of syntax errors we've accumulated
/// so far.
errors: Vec<String>,
}
enum SexpRes {
Eof,
RParen,
Ok,
}
impl Parser {
fn parse(mut self) -> Parse {
// Make sure that the root node covers all source
self.builder.start_node(ROOT.into());
// Parse a list of S-expressions
loop {
match self.sexp() {
SexpRes::Eof => break,
SexpRes::RParen => {
self.builder.start_node(ERROR.into());
self.errors.push("unmatched `)`".to_string());
self.bump(); // be sure to chug along in case of error
self.builder.finish_node();
}
SexpRes::Ok => (),
}
}
// Don't forget to eat *trailing* whitespace
self.skip_ws();
// Close the root node.
self.builder.finish_node();
// Turn the builder into a complete node.
let green: GreenNode = self.builder.finish();
// Construct a `SyntaxNode` from `GreenNode`,
// using errors as the root data.
Parse { green_node: green, errors: self.errors }
}
fn list(&mut self) {
// Start the list node
self.builder.start_node(LIST.into());
self.bump(); // '('
loop {
match self.sexp() {
SexpRes::Eof => {
self.errors.push("expected `)`".to_string());
break;
}
SexpRes::RParen => {
self.bump();
break;
}
SexpRes::Ok => (),
}
}
// close the list node
self.builder.finish_node();
}
fn sexp(&mut self) -> SexpRes {
// Eat leading whitespace
self.skip_ws();
// Either a list, and atom, a closing paren
// or an eof.
let t = match self.current() {
None => return SexpRes::Eof,
Some(R_PAREN) => return SexpRes::RParen,
Some(t) => t,
};
match t {
L_PAREN => self.list(),
WORD => {
self.builder.start_node(ATOM.into());
self.bump();
self.builder.finish_node();
}
ERROR => self.bump(),
_ => unreachable!(),
}
SexpRes::Ok
}
fn bump(&mut self) {
let (kind, text) = self.tokens.pop().unwrap();
self.builder.token(kind.into(), text);
}
fn current(&self) -> Option<SyntaxKind> {
self.tokens.last().map(|(kind, _)| *kind)
}
fn skip_ws(&mut self) {
while self.current() == Some(WHITESPACE) {
self.bump()
}
}
}
let mut tokens = lex(text);
tokens.reverse();
Parser { tokens, builder: GreenNodeBuilder::new(), errors: Vec::new() }.parse()
}
/// Let's check that the parser works as expected
#[test]
fn test_parser()
|
"R_PAREN@[14; 15)".to_string(),
]
);
}
/// So far, we've been working with a homogeneous untyped tree.
/// It's nice to provide generic tree operations, like traversals,
/// but it's a bad fit for semantic analysis.
/// This crate itself does not provide AST facilities directly,
/// but it is possible to layer AST on top of `SyntaxNode` API.
/// Let's write a function to evaluate S-expression.
///
/// For that, let's define AST nodes.
/// It'll be quite a bunch of repetitive code, so we'll use a macro.
///
/// For a real language, you'd want to generate an AST. I find a
/// combination of `serde`, `ron` and `tera` crates invaluable for that!
macro_rules! ast_node {
($ast:ident, $kind:ident) => {
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct $ast(SyntaxNode);
impl $ast {
#[allow(unused)]
fn cast(node: SyntaxNode) -> Option<Self> {
if node.kind() == $kind {
Some(Self(node))
} else {
None
}
}
}
};
}
ast_node!(Root, ROOT);
ast_node!(Atom, ATOM);
ast_node!(List, LIST);
// Sexp is slightly different, so let's do it by hand.
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct Sexp(SyntaxNode);
enum SexpKind {
Atom(Atom),
List(List),
}
impl Sexp {
fn cast(node: SyntaxNode) -> Option<Self> {
if Atom::cast(node.clone()).is_some() || List::cast(node.clone()).is_some() {
Some(Sexp(node))
} else {
None
}
}
fn kind(&self) -> SexpKind {
Atom::cast(self.0.clone())
.map(SexpKind::Atom)
.or_else(|| List::cast(self.0.clone()).map(SexpKind::List))
.unwrap()
}
}
// Let's enhance AST nodes with ancillary functions and
// eval.
impl Root {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
}
enum Op {
Add,
Sub,
Div,
Mul,
}
impl Atom {
fn eval(&self) -> Option<i64> {
self.text().parse().ok()
}
fn as_op(&self) -> Option<Op> {
let op = match self.text().as_str() {
"+" => Op::Add,
"-" => Op::Sub,
"*" => Op::Mul,
"/" => Op::Div,
_ => return None,
};
Some(op)
}
fn text(&self) -> &SmolStr {
match &self.0.green().children().next() {
Some(rowan::NodeOrToken::Token(token)) => token.text(),
_ => unreachable!(),
}
}
}
impl List {
fn sexps(&self) -> impl Iterator<Item = S
|
{
let text = "(+ (* 15 2) 62)";
let node = parse(text);
assert_eq!(
format!("{:?}", node),
"ROOT@[0; 15)", // root node, spanning 15 bytes
);
assert_eq!(node.children().count(), 1);
let list = node.children().next().unwrap();
let children = list.children().map(|child| format!("{:?}", child)).collect::<Vec<_>>();
assert_eq!(
children,
vec![
"L_PAREN@[0; 1)".to_string(),
"ATOM@[1; 2)".to_string(),
"WHITESPACE@[2; 3)".to_string(), // note, explicit whitespace!
"LIST@[3; 11)".to_string(),
"WHITESPACE@[11; 12)".to_string(),
"ATOM@[12; 14)".to_string(),
|
identifier_body
|
s_expressions.rs
|
SyntaxKind::*;
impl From<SyntaxKind> for rowan::SyntaxKind {
fn from(kind: SyntaxKind) -> Self {
Self(kind as u16)
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
enum Lang {}
impl rowan::Language for Lang {
type Kind = SyntaxKind;
fn kind_from_raw(raw: rowan::SyntaxKind) -> Self::Kind {
assert!(raw.0 <= ROOT as u16);
unsafe { std::mem::transmute::<u16, SyntaxKind>(raw.0) }
}
fn kind_to_raw(kind: Self::Kind) -> rowan::SyntaxKind {
kind.into()
}
}
type SyntaxNode = rowan::SyntaxNode<Lang>;
#[allow(unused)]
type SyntaxToken = rowan::SyntaxToken<Lang>;
#[allow(unused)]
type SyntaxElement = rowan::NodeOrToken<SyntaxNode, SyntaxToken>;
/// GreenNode is an immutable tree, which is cheap to change,
/// but doesn't contain offsets and parent pointers.
use rowan::GreenNode;
/// You can construct GreenNodes by hand, but a builder
/// is helpful for top-down parsers: it maintains a stack
/// of currently in-progress nodes
use rowan::GreenNodeBuilder;
/// This is the main type this crate exports.
/// It is also immutable, like a GreenNode,
/// but it contains parent pointers, offsets, and
/// has identity semantics.
/// SyntaxNode exist in borrowed and owned flavors,
/// which is controlled by the `R` parameter.
struct Parse {
green_node: rowan::GreenNode,
#[allow(unused)]
errors: Vec<String>,
}
impl Parse {
fn syntax(&self) -> Root {
Root::cast(SyntaxNode::new_root(self.green_node.clone())).unwrap()
}
}
/// Now, let's write a parser.
/// Note that `parse` does not return a `Result`:
/// by design, syntax tree can be build even for
/// completely invalid source code.
fn parse(text: &str) -> Parse {
struct Parser {
/// input tokens, including whitespace,
/// in *reverse* order.
tokens: Vec<(SyntaxKind, SmolStr)>,
/// the in-progress tree.
builder: GreenNodeBuilder<'static>,
/// the list of syntax errors we've accumulated
/// so far.
errors: Vec<String>,
}
enum SexpRes {
Eof,
RParen,
Ok,
}
impl Parser {
fn parse(mut self) -> Parse {
// Make sure that the root node covers all source
self.builder.start_node(ROOT.into());
// Parse a list of S-expressions
loop {
match self.sexp() {
SexpRes::Eof => break,
SexpRes::RParen => {
self.builder.start_node(ERROR.into());
self.errors.push("unmatched `)`".to_string());
self.bump(); // be sure to chug along in case of error
self.builder.finish_node();
}
SexpRes::Ok => (),
}
}
// Don't forget to eat *trailing* whitespace
self.skip_ws();
// Close the root node.
self.builder.finish_node();
// Turn the builder into a complete node.
let green: GreenNode = self.builder.finish();
// Construct a `SyntaxNode` from `GreenNode`,
// using errors as the root data.
Parse { green_node: green, errors: self.errors }
}
fn list(&mut self) {
// Start the list node
self.builder.start_node(LIST.into());
self.bump(); // '('
loop {
match self.sexp() {
SexpRes::Eof => {
self.errors.push("expected `)`".to_string());
break;
}
SexpRes::RParen => {
self.bump();
break;
}
SexpRes::Ok => (),
}
}
// close the list node
self.builder.finish_node();
}
fn sexp(&mut self) -> SexpRes {
// Eat leading whitespace
self.skip_ws();
// Either a list, and atom, a closing paren
// or an eof.
let t = match self.current() {
None => return SexpRes::Eof,
Some(R_PAREN) => return SexpRes::RParen,
Some(t) => t,
};
match t {
L_PAREN => self.list(),
WORD => {
self.builder.start_node(ATOM.into());
self.bump();
self.builder.finish_node();
}
ERROR => self.bump(),
_ => unreachable!(),
}
SexpRes::Ok
}
fn bump(&mut self) {
let (kind, text) = self.tokens.pop().unwrap();
self.builder.token(kind.into(), text);
}
fn current(&self) -> Option<SyntaxKind> {
self.tokens.last().map(|(kind, _)| *kind)
}
fn skip_ws(&mut self) {
while self.current() == Some(WHITESPACE) {
self.bump()
}
}
}
let mut tokens = lex(text);
tokens.reverse();
Parser { tokens, builder: GreenNodeBuilder::new(), errors: Vec::new() }.parse()
}
/// Let's check that the parser works as expected
#[test]
fn test_parser() {
let text = "(+ (* 15 2) 62)";
let node = parse(text);
assert_eq!(
format!("{:?}", node),
"ROOT@[0; 15)", // root node, spanning 15 bytes
);
assert_eq!(node.children().count(), 1);
let list = node.children().next().unwrap();
let children = list.children().map(|child| format!("{:?}", child)).collect::<Vec<_>>();
assert_eq!(
children,
vec![
"L_PAREN@[0; 1)".to_string(),
"ATOM@[1; 2)".to_string(),
"WHITESPACE@[2; 3)".to_string(), // note, explicit whitespace!
"LIST@[3; 11)".to_string(),
"WHITESPACE@[11; 12)".to_string(),
"ATOM@[12; 14)".to_string(),
"R_PAREN@[14; 15)".to_string(),
]
);
}
/// So far, we've been working with a homogeneous untyped tree.
/// It's nice to provide generic tree operations, like traversals,
/// but it's a bad fit for semantic analysis.
/// This crate itself does not provide AST facilities directly,
/// but it is possible to layer AST on top of `SyntaxNode` API.
/// Let's write a function to evaluate S-expression.
///
/// For that, let's define AST nodes.
/// It'll be quite a bunch of repetitive code, so we'll use a macro.
///
/// For a real language, you'd want to generate an AST. I find a
/// combination of `serde`, `ron` and `tera` crates invaluable for that!
macro_rules! ast_node {
($ast:ident, $kind:ident) => {
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct $ast(SyntaxNode);
impl $ast {
#[allow(unused)]
fn cast(node: SyntaxNode) -> Option<Self> {
if node.kind() == $kind {
Some(Self(node))
} else {
None
}
}
}
};
}
ast_node!(Root, ROOT);
ast_node!(Atom, ATOM);
ast_node!(List, LIST);
// Sexp is slightly different, so let's do it by hand.
#[derive(PartialEq, Eq, Hash)]
#[repr(transparent)]
struct Sexp(SyntaxNode);
enum SexpKind {
Atom(Atom),
List(List),
}
impl Sexp {
fn cast(node: SyntaxNode) -> Option<Self> {
if Atom::cast(node.clone()).is_some() || List::cast(node.clone()).is_some() {
Some(Sexp(node))
} else
|
}
fn kind(&self) -> SexpKind {
Atom::cast(self.0.clone())
.map(SexpKind::Atom)
.or_else(|| List::cast(self.0.clone()).map(SexpKind::List))
.unwrap()
}
}
// Let's enhance AST nodes with ancillary functions and
// eval.
impl Root {
fn sexps(&self) -> impl Iterator<Item = Sexp> + '_ {
self.0.children().filter_map(Sexp::cast)
}
}
enum Op {
Add,
Sub,
Div,
Mul,
}
impl Atom {
fn eval(&self) -> Option<i64> {
self.text().parse().ok()
}
fn as_op(&self) -> Option<Op> {
let op = match self.text().as_str() {
"+" => Op::Add,
"-" => Op::Sub,
"*" => Op::Mul,
"/" => Op::Div,
_ => return None,
};
Some(op)
}
fn text(&self) -> &SmolStr {
match &self.0.green().children().next() {
Some(rowan::NodeOrToken::Token(token)) => token.text(),
_ => unreachable!(),
}
}
}
impl List {
fn sexps(&self) -> impl Iterator<Item = S
|
{
None
}
|
conditional_block
|
play.js
|
{hanlderName:'list_play',src:'../../assets/images/list_play.png',className:'button-bar-list_play',hidden:false},
{hanlderName:'cycle_list_play',src:'../../assets/images/cycle_list_play.png',className:'button-bar-cycle_list_play',hidden:true},
{hanlderName:'cycle_single_play',src:'../../assets/images/cycle_single_play.png',className:'button-bar-cycle_single_play',hidden:true},
{hanlderName:'random_play',src:'../../assets/images/random_play.png',className:'button-bar-random_play',hidden:true},
],
//节流函数的开始
timeStart:0,
//进度条的位置(红柱的长度)
progressBarWidth:0,
//进度条的节点信息
progressBarPosition:[],
nowClientX:0,
newClientX:0
},
/**
* 生命周期函数--监听页面加载
*/
onLoad: function (options) {
this.setData({
songInfo:app.globalData.songInfo,
musicList:app.globalData.musicList,
})
app.globalData.backgroundAudioManager.pause()
this.changePlayType('list_play')
},
/**
* 生命周期函数--监听页面初次渲染完成
*/
onReady: function () {
let that=this
const query = wx.createSelectorQuery()
query.select('#progressBar').boundingClientRect()
query.selectViewport().scrollOffset()
query.exec(function(res){
that.setData({
progressBarPosition:res
})
})
},
/**
* 生命周期函数--监听页面显示
*/
onShow: function () {
if (app.globalData.id) {
let ids=this.data.musicList.map(v=>v.id)
//有ID,并且是新的ID
if (ids.indexOf(app.globalData.id)==-1) {
this.createPlayerFn(app.globalData.id)
}
//是已经有的ID,
if (ids.indexOf(app.globalData.id)!=-1) {
// 并且不是当前歌曲,做切歌操作
if (app.globalData.id!=this.data.songInfo.id) {
this.createBackgroundAudioManager(this.data.musicList[ids.indexOf(app.globalData.id)])
}else{ //就是当前歌曲,不做操作
return
}
}
}else{//刚进入没选歌,从下方Bar点进来
if (app.globalData.backgroundAudioManager.src) {
return
}
if (this.data.musicList.length>0) {
//创建播放器
this.createBackgroundAudioManager(this.data.musicList[0],false)
}else{
wx.showToast({
title: '播放列表为空,请添加歌曲',
icon: 'none',
image: '',
duration: 1500,
mask: false,
success: (result)=>{
// wx.switchTab({
// url: "../discover/discover",
// })
},
fail: ()=>{},
complete: ()=>{}
});
}
}
},
/**
* 生命周期函数--监听页面隐藏
*/
onHide: function () {
},
/**
* 生命周期函数--监听页面卸载
*/
onUnload: function () {
},
/**
* 页面相关事件处理函数--监听用户下拉动作
*/
onPullDownRefresh: function () {
},
/**
* 页面上拉触底事件的处理函数
*/
onReachBottom: function () {
},
/**
* 用户点击右上角分享
*/
onShareAppMessage: function () {
},
//请求播放地址和歌曲详情
createPlayerFn:async function (id) {
let resPlayURL=await api.getPlayURL({id:id}),//先拿到播放地址
resSongInfo=await api.getPlayDtail({ids:id}) //再拿歌曲详情(拿封面)
if (resPlayURL.code==200 && resSongInfo.code==200) {
//当前歌曲播放信息
let singer=''
//歌手可能多个
resSongInfo.songs[0].ar.map((item,index)=>{
singer+=index==resSongInfo.songs[0].ar.length-1?item.name:item.name+'/'
})
let songInfo={
id:id,
title:resSongInfo.songs[0].name,
singer:singer+' - '+resSongInfo.songs[0].al.name,//歌手/歌手/歌手-专辑
src:resPlayURL.data[0].url,
picUrl:resSongInfo.songs[0].al.picUrl,
songTime_ms:resSongInfo.songs[0].dt,//单位毫秒
songTime_s:resSongInfo.songs[0].dt/1000,//单位毫秒
}
//创建播放器
this.createBackgroundAudioManager(songInfo)
app.globalData.musicList.push(songInfo)
this.setData({
songInfo:songInfo,
musicList:app.globalData.musicList,
isPlay:true
})
wx.setStorageSync("musicList",this.data.musicList)
}else{
}
},
//控制播放
playControlFn:function (event) {
let hanlderName=event.currentTarget.dataset.hanldername
//----------------------第一个图标 调整播放模式--------------------
if (hanlderName=='list_play' || hanlderName=='cycle_list_play' || hanlderName=='cycle_single_play' || hanlderName=='random_play') {
let newhanlderName=''
//点击的是列表播放 (逻辑应设置为下一个,即列表循环)
if (hanlderName=='list_play') newhanlderName='cycle_list_play'
//点击的是列表循环 (逻辑应设置为下一个,即单曲循环)
if (hanlderName=='cycle_list_play') newhanlderName='cycle_single_play'
//点击的是单曲循环 (逻辑应设置为下一个,即随机播放)
if (hanlderName=='cycle_single_play') newhanlderName='random_play'
//点击的是随机播放 (逻辑应设置为下一个,即列表播放)
if (hanlderName=='random_play') newhanlderName='list_play'
this.changePlayType(newhanlderName)
return
}
// -------------------------------------------
//上一曲,下一曲
if (hanlderName=='prev' || hanlderName=='next') {
this.switchMusic(hanlderName)
}
//播放
if (hanlderName=='play') {
if (app.globalData.backgroundAudioManager.src) {
app.globalData.backgroundAudioManager.play()
}else{
this.createBackgroundAudioManager(this.data.songInfo)
}
this.setData({
isPlay:true
})
return
}
//暂停
if (hanlderName=='pause') {
app.globalData.backgroundAudioManager.pause()
this.setData({
isPlay:false
})
return
}
//列表
if (hanlderName=='list') {
wx.navigateTo({
url: '../musicList/musicList',
})
return
}
},
//上下曲切歌逻辑
switchMusic:function (type) {
let musicList=this.data.musicList,
nowIndex=this.getNowPlayIndex(),
|
newIndex=null
console.log(musicList);
console.log(nowIndex);
console.log(type);
console.log(playType);
if (musicList.length<=1) {
return
}
//随机播放(点上一曲或者下一曲哪个按钮都没联系,随机切)
if (playType=='random_play') {
//获取一个小于播放列表length的随机数并且不等于当前播放的index
let getRandomNum=function getRandomNumFn(index) {
newIndex=Math.floor(Math.random()*musicList.length)
if (newIndex==index) {
getRandomNumFn(index)
} else {
return newIndex
}
}
getRandomNum(nowIndex)
}
//其他三个是正常的上一曲和下一曲
if (playType=='list_play' || playType=='cycle_list_play' || playType=='cycle_single_play') {
//上一首
if (type=="prev") {
// 如果是第一首,上一首切换到最后一首
if (nowIndex==0) {
newIndex=musicList.length-1
}else{ //正常上一首
newIndex=nowIndex-1
}
}
//下一首
if (type=="next") {
// 如果是最后一首,下一首切换到第一首
if (nowIndex==musicList.length-1) {
newIndex=0
}else{ //正常下一首
newIndex=nowIndex+1
}
}
}
console.log(musicList[nowIndex].title);
console.log(musicList[newIndex].title);
this.createBackgroundAudioManager(musicList[newIndex])
},
//切换播放模式
changePlayType:function (playType) {
let playTypeList=this.data.playTypeList
//设置新的图标显示和播放模式
this.setData({
playTypeList:playTypeList.map(v=>{
return {
...v,
hidden:v.hanlderName==playType?false:true
}
}),
playType:playType
})
//改变自然播放完毕后的逻辑
app.globalData.backgroundAudioManager.onEnded(()=>{
console.log('in');
let nowIndex=this.getNowPlayIndex(),
musicList=this.data.musicList
//列表播放
|
playType=this.data.playType,
|
random_line_split
|
play.js
|
hanlderName:'list_play',src:'../../assets/images/list_play.png',className:'button-bar-list_play',hidden:false},
{hanlderName:'cycle_list_play',src:'../../assets/images/cycle_list_play.png',className:'button-bar-cycle_list_play',hidden:true},
{hanlderName:'cycle_single_play',src:'../../assets/images/cycle_single_play.png',className:'button-bar-cycle_single_play',hidden:true},
{hanlderName:'random_play',src:'../../assets/images/random_play.png',className:'button-bar-random_play',hidden:true},
],
//节流函数的开始
timeStart:0,
//进度条的位置(红柱的长度)
progressBarWidth:0,
//进度条的节点信息
progressBarPosition:[],
nowClientX:0,
newClientX:0
},
/**
* 生命周期函数--监听页面加载
*/
onLoad: function (options) {
this.setData({
songInfo:app.globalData.songInfo,
musicList:app.globalData.musicList,
})
app.globalData.backgroundAudioManager.pause()
this.changePlayType('list_play')
},
/**
* 生命周期函数--监听页面初次渲染完成
*/
onReady: function () {
let that=this
const query = wx.createSelectorQuery()
query.select('#progressBar').boundingClientRect()
query.selectViewport().scrollOffset()
query.exec(function(res){
that.setData({
progressBarPosition:res
})
})
},
/**
* 生命周期函数--监听页面显示
*/
onShow: function () {
if (app.globalData.id) {
let ids=this.data.musicList.map(v=>v.id)
//有ID,并且是新的ID
if (ids.indexOf(app.globalData.id)==-1) {
this.createPlayerFn(app.globalData.id)
}
//是已经有的ID,
if (ids.indexOf(app.globalData.id)!=-1) {
// 并且不是当前歌曲,做切歌操作
if (app.globalData.id!=this.data.songInfo.id) {
this.createBackgroundAudioManager(this.data.musicList[ids.indexOf(app.globalData.id)])
}else{ //就是当前歌曲,不做操作
return
}
}
}else{//刚进入没选歌,从下方Bar点进来
if (app.globalData.backgroundAudioManager.src) {
return
}
if (this.data.musicList.length>0) {
//创建播放器
this.createBackgroundAudioManager(this.data.musicList[0],false)
}else{
wx.showToast({
title: '播放列表为空,请添加歌曲',
icon: 'none',
image: '',
duration: 1500,
mask: false,
success: (result)=>{
// wx.switchTab({
// url: "../discover/discover",
// })
},
fail: ()=>{},
complete: ()=>{}
});
}
}
},
/**
* 生命周期函数--监听页面隐藏
*/
onHide: function () {
},
/**
* 生命周期函数--监听页面卸载
*/
onUnload: function () {
},
/**
* 页面相关事件处理函数--监听用户下拉动作
*/
onPullDownRefresh: function () {
},
/**
* 页面上拉触底事件的处理函数
*/
onReachBottom: function () {
},
/**
* 用户点击右上角分享
*/
onShareAppMessage: function () {
},
//请求播放地址和歌曲详情
createPlayerFn:async function (id) {
let resPlayURL=await api.getPlayURL({id:id}),//先拿到播放地址
resSongInfo=await api.getPlayDtail({ids:id}) //再拿歌曲详情(拿封面)
if (resPlayURL.code==200 && resSongInfo.code==200) {
//当前歌曲播放信息
let singer=''
//歌手可能多个
resSongInfo.songs[0].ar.map((item,index)=>{
singer+=index==resSongInfo.songs[0].ar.length-1?item.name:item.name+'/'
})
let songInfo={
id:id,
title:resSongInfo.songs[0].name,
singer:singer+' - '+resSongInfo.songs[0].al.name,//歌手/歌手/歌手-专辑
src:resPlayURL.data[0].url,
picUrl:resSongInfo.songs[0].al.picUrl,
songTime_ms:resSongInfo.songs[0].dt,//单位毫秒
songTime_s:resSongInfo.songs[0].dt/1000,//单位毫秒
}
//创建播放器
this.createBackgroundAudioManager(songInfo)
app.globalData.musicList.push(songInfo)
this.setData({
songInfo:songInfo,
musicList:app.globalData.musicList,
isPlay:true
})
wx.setStorageSync("musicList",this.data.musicList)
}else{
}
},
//控制播放
playControlFn:function (event) {
let hanlderName=event.currentTarget.dataset.hanldername
//----------------------第一个图标 调整播放模式--------------------
if (hanlderName=='list_play' || hanlderName=='cycle_list_play' || hanlderName=='cycle_single_play' || hanlderName=='random_play') {
let newhanlderName=''
//点击的是列表播放 (逻辑应设置为下一个,即列表循环)
if (hanlderName=='list_play') newhanlderName='cycle_list_play'
//点击的是列表循环 (逻辑应设置为下一个,即单曲循环)
if (hanlderName=='cycle_list_play') newhanlderName='cycle_single_play'
//点击的是单曲循环 (逻辑应设置为下一个,即随机播放)
if (hanlderName=='cycle_single_play') newhanlderName='random_play'
//点击的是随机播放 (逻辑应设置为下一个,即列表播放)
if (hanlderName=='random_play') newhanlderName='list_play'
this.changePlayType(newhanlderName)
return
}
// -------------------------------------------
//上一曲,下一曲
if (hanlderName=='prev' || hanlderName=='next') {
this.switchMusic(hanlderName)
}
//播放
if (hanlderName=='play') {
if (app.globalData.backgroundAudioManager.src) {
app.globalData.backgroundAudioManager.play()
}else{
this.createBackgroundAudioManager(this.data.songInfo)
}
this.setData({
isPlay:true
})
return
}
//暂停
if (hanlderName=='pause') {
app.globalData.backgroundAudioManager.pause()
this.setData({
isPlay:false
})
return
}
//列表
if (hanlderName=='list') {
wx.navigateTo({
url: '../musicList/musicList',
})
return
}
},
//上下曲切歌逻辑
switchMusic:function (type) {
let musicList=this.data.musicList,
nowIndex=this.getNowPlayIndex(),
playType=this.data.playType,
newIndex=null
console.log(musicList);
console.log(nowIndex);
console.log(type);
console.log(playType);
if (musicList.length<=1) {
return
}
//随机播放(点上一曲或者下一曲哪个按钮都没联系,随机切)
if (playType=='random_play') {
//获取一个小于播放列表length的随机数并且不等于当前播放的index
let getRandomNum=function getRandomNumFn(index) {
newIndex=Math.floor(Math.random()*musicList.length)
if (newIndex==index) {
getRandomNumFn(index)
} else {
return newIndex
}
}
getRandomNum(nowIndex)
}
//其他三个是正常的上一曲和下一曲
if (playType=='list_play' || playType=='cycle_list_play' || playType=='cycle_single_play') {
//上一首
if (type=="prev") {
// 如果是第一首,上一首切换到最后一首
if (nowIndex==0) {
newIndex=musicList.length-1
}else{ //正常上一首
newIndex=nowIndex-1
}
}
//下一首
if (type=="next") {
// 如果是最后一首,下一首切换到第一首
if (nowIndex==musicList.length-1) {
newIndex=0
}else{ //正常下一首
newIndex=nowIndex+1
}
}
}
console.log(musicList[nowIndex].title);
console.log(musicList[newIndex].title);
this.createBackgroundAudioManager(musicList[newIndex])
},
//切换播放模式
changePlayType:function (playType) {
let playTypeList=this.data.playTypeList
//设置新的图标显示和播放模式
this.setD
|
=>{
return {
...v,
hidden:v.hanlderName==playType?false:true
}
}),
playType:playType
})
//改变自然播放完毕后的逻辑
app.globalData.backgroundAudioManager.onEnded(()=>{
console.log('in');
let nowIndex=this.getNowPlayIndex(),
musicList=this.data.musicList
//列表播放
|
ata({
playTypeList:playTypeList.map(v
|
conditional_block
|
controller.go
|
,
}
type ControllerConfiguration struct {
ReconcilerSyncLoopPeriod metav1.Duration
}
// DefaultCaffe2JobControllerConfiguration is the suggested caffe2-operator configuration for production.
var DefaultCaffe2JobControllerConfiguration ControllerConfiguration = ControllerConfiguration{
ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 15 * time.Second},
}
type Controller struct {
config ControllerConfiguration
// podControl is used to add or delete pods.
podControl PodControlInterface
// serviceControl is used to add or delete services.
serviceControl ServiceControlInterface
// kubeClient is a standard kubernetes clientset.
kubeClient kubernetes.Interface
// caffe2JobClientSet is a clientset for CRD Caffe2Job.
caffe2JobClient jobclient.Interface
// caffe2JobLister can list/get caffe2jobs from the shared informer's store.
caffe2JobLister listers.Caffe2JobLister
// podLister can list/get pods from the shared informer's store.
podLister corelisters.PodLister
// serviceLister can list/get services from the shared informer's store.
serviceLister corelisters.ServiceLister
podInformer clientv1.PodInformer
caffe2JobInformer v1alpha1.Caffe2JobInformer
// returns true if the caffe2job store has been synced at least once.
caffe2JobSynced cache.InformerSynced
// podListerSynced returns true if the pod store has been synced at least once.
podListerSynced cache.InformerSynced
// serviceListerSynced returns true if the service store has been synced at least once.
serviceListerSynced cache.InformerSynced
// WorkQueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time, and makes it easy to ensure we are never processing the same item
// simultaneously in two different workers.
workQueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
// To allow injection of syncCaffe2Job for testing.
syncHandler func(jobKey string) (bool, error)
// To allow injection of updateStatus for testing.
updateStatusHandler func(job *api.Caffe2Job) error
}
func New(kubeClient kubernetes.Interface, caffe2JobClient jobclient.Interface) (*Controller, error) {
kubeflowscheme.AddToScheme(scheme.Scheme)
glog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: controllerName})
podControl := RealPodControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "caffe2job-controller"}),
}
serviceControl := RealServiceControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "caffe2job-controller"}),
}
controller := &Controller{
podControl: podControl,
serviceControl: serviceControl,
kubeClient: kubeClient,
caffe2JobClient: caffe2JobClient,
workQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Caffe2jobs"),
recorder: recorder,
}
caffe2JobInformerFactory := informers.NewSharedInformerFactory(caffe2JobClient, time.Second*30)
podInformerFactory := k8sinformers.NewSharedInformerFactory(kubeClient, time.Second*30)
controller.caffe2JobInformer = caffe2JobInformerFactory.Kubeflow().V1alpha1().Caffe2Jobs()
glog.Info("Setting up event handlers")
// Set up an event handler for when Foo resources change
controller.caffe2JobInformer.Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch t := obj.(type) {
case *api.Caffe2Job:
glog.V(4).Infof("filter caffe2job name: %v", t.Name)
return true
default:
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: controller.addCaffe2Job,
UpdateFunc: controller.updateCaffe2Job,
DeleteFunc: controller.enqueueController,
},
})
controller.caffe2JobLister = controller.caffe2JobInformer.Lister()
controller.caffe2JobSynced = controller.caffe2JobInformer.Informer().HasSynced
// create informer for pod information
controller.podInformer = podInformerFactory.Core().V1().Pods()
controller.podInformer.Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch obj.(type) {
case *v1.Pod:
pod := obj.(*v1.Pod)
if _, ok := pod.Labels["caffe2_job_key"]; !ok {
return false
}
return pod.Status.Phase == v1.PodRunning || pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed
default:
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: controller.addPod,
UpdateFunc: controller.updatePod,
DeleteFunc: controller.deletePod,
},
})
controller.podLister = controller.podInformer.Lister()
controller.podListerSynced = controller.podInformer.Informer().HasSynced
controller.syncHandler = controller.syncCaffe2Job
controller.updateStatusHandler = controller.updateCaffe2JobStatus
return controller, nil
}
// Run will set up the event handlers for types we are interested in, as well
// as syncing informer caches and starting workers. It will block until stopCh
// is closed, at which point it will shutdown the workqueue and wait for
// workers to finish processing their current work items.
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer runtime.HandleCrash()
defer c.workQueue.ShutDown()
go c.podInformer.Informer().Run(stopCh)
go c.caffe2JobInformer.Informer().Run(stopCh)
// Start the informer factories to begin populating the informer caches
glog.Info("Starting Caffe2Job controller")
// Wait for the caches to be synced before starting workers
glog.Info("Waiting for informer caches to sync")
glog.V(4).Info("Sync caffe2jobs...")
if ok := cache.WaitForCacheSync(stopCh, c.caffe2JobSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
glog.V(4).Info("Sync pods...")
if ok := cache.WaitForCacheSync(stopCh, c.podListerSynced); !ok {
return fmt.Errorf("failed to wait for pod caches to sync")
}
glog.Infof("Starting %v workers", threadiness)
// Launch workers to process Caffe2Job resources
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
glog.Info("Started workers")
<-stopCh
glog.Info("Shutting down workers")
return nil
}
// runWorker is a long-running function that will continually call the
// processNextWorkItem function in order to read and process a message on the
// workqueue.
func (c *Controller) runWorker() {
for c.processNextWorkItem() {
}
}
// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the syncHandler.
func (c *Controller) processNextWorkItem() bool {
key, quit := c.workQueue.Get()
if quit {
return false
}
defer c.workQueue.Done(key)
forget, err := c.syncHandler(key.(string))
if err == nil {
if forget
|
return true
}
utilruntime.HandleError(fmt.Errorf("Error syncing job: %v", err))
c.workQueue.AddRateLimited(key)
return true
}
// syncCaffe2Job will sync the job with the given. This function is not meant to be invoked
// concurrently with the same key.
//
// When a job is completely processed it will return true indicating that its ok to forget about this job since
// no more processing will occur for it.
func (c *Controller) syncCaffe2Job(key string) (bool, error) {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime))
}()
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return false, err
}
if len(ns) == 0 || len(name)
|
{
c.workQueue.Forget(key)
}
|
conditional_block
|
controller.go
|
processNextWorkItem() bool {
key, quit := c.workQueue.Get()
if quit {
return false
}
defer c.workQueue.Done(key)
forget, err := c.syncHandler(key.(string))
if err == nil {
if forget {
c.workQueue.Forget(key)
}
return true
}
utilruntime.HandleError(fmt.Errorf("Error syncing job: %v", err))
c.workQueue.AddRateLimited(key)
return true
}
// syncCaffe2Job will sync the job with the given. This function is not meant to be invoked
// concurrently with the same key.
//
// When a job is completely processed it will return true indicating that its ok to forget about this job since
// no more processing will occur for it.
func (c *Controller) syncCaffe2Job(key string) (bool, error) {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime))
}()
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return false, err
}
if len(ns) == 0 || len(name) == 0 {
return false, fmt.Errorf("invalid job key %q: either namespace or name is missing", key)
}
job, err := c.caffe2JobLister.Caffe2Jobs(ns).Get(name)
if err != nil {
if apierrors.IsNotFound(err) {
glog.V(4).Infof("Job has been deleted: %v", key)
return true, nil
}
return false, err
}
glog.Infof("Caffe2Jobs: %#v", job)
var reconcileCaffe2JobsErr error
if job.DeletionTimestamp == nil {
reconcileCaffe2JobsErr = c.reconcileCaffe2Jobs(job)
}
if reconcileCaffe2JobsErr != nil {
return false, reconcileCaffe2JobsErr
}
return true, err
}
// obj could be an *batch.Job, or a DeletionFinalStateUnknown marker item.
func (c *Controller) enqueueController(obj interface{}) {
key, err := keyFunc(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
return
}
c.workQueue.AddRateLimited(key)
}
// reconcileCaffe2Jobs checks and updates replicas for each given Caffe2ReplicaSpec.
// It will requeue the caffe2job in case of an error while creating/deleting pods/services.
func (c *Controller) reconcileCaffe2Jobs(job *api.Caffe2Job) error {
glog.Infof("Reconcile Caffe2Jobs %s", job.Name)
pods, err := c.getPodsForCaffe2Job(job)
if err != nil {
glog.Infof("getPodsForCaffe2Job error %v", err)
return err
}
glog.V(4).Infof("Pods is %#v", pods)
/* TODO services
services, err := c.getServicesForCaffe2Job(job)
if err != nil {
glog.Infof("getServicesForCaffe2Job error %v", err)
return err
}
*/
// Diff current active pods/services with replicas.
spec := job.Spec.ReplicaSpecs
err = c.reconcilePods(job, pods, spec)
if err != nil {
glog.Infof("reconcilePods error %v", err)
return err
}
/*
err = c.reconcileServices(job, services, rtype, spec)
if err != nil {
glog.Infof("reconcileServices error %v", err)
return err
}
*/
// TODO: Add check here, no need to update the caffe2job if the status hasn't changed since last time.
return c.updateStatusHandler(job)
}
func genLabels(id, jobKey string) map[string]string {
return map[string]string{
"group_name": api.GroupName,
"caffe2_job_key": strings.Replace(jobKey, "/", "-", -1),
"runtime_id": id,
}
}
// When a pod is added, set the defaults and enqueue the current caffe2job.
func (c *Controller) addCaffe2Job(obj interface{}) {
job := obj.(*api.Caffe2Job)
msg := fmt.Sprintf("Caffe2Job %s is created.", job.Name)
glog.Info(msg)
scheme.Scheme.Default(job)
// Leave a created condition.
err := c.updateCaffe2JobConditions(job, api.Caffe2JobCreated, caffe2JobCreatedReason, msg)
if err != nil {
glog.Errorf("Append caffe2job condition error: %v", err)
return
}
c.enqueueController(obj)
}
// When a pod is updated, enqueue the current caffe2job.
func (c *Controller) updateCaffe2Job(old, cur interface{}) {
oldCaffe2Job := old.(*api.Caffe2Job)
glog.Infof("Updating caffe2job: %s", oldCaffe2Job.Name)
c.enqueueController(cur)
}
func (c *Controller) updateCaffe2JobStatus(job *api.Caffe2Job) error {
_, err := c.caffe2JobClient.KubeflowV1alpha1().Caffe2Jobs(job.Namespace).Update(job)
return err
}
func (c *Controller) updateCaffe2JobConditions(job *api.Caffe2Job, conditionType api.Caffe2JobConditionType, reason, message string) error {
condition := newCondition(conditionType, reason, message)
setCondition(&job.Status, condition)
return nil
}
// resolveControllerRef returns the tfjob referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching tfjob
// of the correct Kind.
func (c *Controller) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *api.Caffe2Job {
// We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it's the wrong Kind.
if controllerRef.Kind != controllerKind.Kind {
return nil
}
job, err := c.caffe2JobLister.Caffe2Jobs(namespace).Get(controllerRef.Name)
if err != nil {
return nil
}
if job.UID != controllerRef.UID {
// The controller we found with this Name is not the same one that the
// ControllerRef points to.
return nil
}
return job
}
func genOwnerReference(job *api.Caffe2Job) *metav1.OwnerReference {
boolPtr := func(b bool) *bool { return &b }
controllerRef := &metav1.OwnerReference{
APIVersion: groupVersionKind.GroupVersion().String(),
Kind: groupVersionKind.Kind,
Name: job.Name,
UID: job.UID,
BlockOwnerDeletion: boolPtr(true),
Controller: boolPtr(true),
}
return controllerRef
}
// newCondition creates a new caffe2job condition.
func newCondition(conditionType api.Caffe2JobConditionType, reason, message string) api.Caffe2JobCondition {
return api.Caffe2JobCondition{
Type: conditionType,
Status: v1.ConditionTrue,
LastUpdateTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Reason: reason,
Message: message,
}
}
// getCondition returns the condition with the provided type.
func getCondition(status api.Caffe2JobStatus, condType api.Caffe2JobConditionType) *api.Caffe2JobCondition {
for i := range status.Conditions {
c := status.Conditions[i]
if c.Type == condType {
return &c
}
}
return nil
}
// setCondition updates the caffe2job to include the provided condition.
// If the condition that we are about to add already exists
// and has the same status and reason then we are not going to update.
func setCondition(status *api.Caffe2JobStatus, condition api.Caffe2JobCondition) {
currentCond := getCondition(*status, condition.Type)
// Do nothing if condition doesn't change
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason {
return
}
// Do not update lastTransitionTime if the status of the condition doesn't change.
if currentCond != nil && currentCond.Status == condition.Status {
condition.LastTransitionTime = currentCond.LastTransitionTime
}
// Append the updated condition to the
newConditions := filterOutCondition(status.Conditions, condition.Type)
status.Conditions = append(newConditions, condition)
}
// removeCondition removes the caffe2job condition with the provided type.
func removementCondition(status *api.Caffe2JobStatus, condType api.Caffe2JobConditionType) {
status.Conditions = filterOutCondition(status.Conditions, condType)
}
// filterOutCondition returns a new slice of caffe2job conditions without conditions with the provided type.
func filterOutCondition(conditions []api.Caffe2JobCondition, condType api.Caffe2JobConditionType) []api.Caffe2JobCondition
|
{
var newConditions []api.Caffe2JobCondition
for _, c := range conditions {
if c.Type == condType {
continue
}
newConditions = append(newConditions, c)
}
return newConditions
}
|
identifier_body
|
|
controller.go
|
,
}
type ControllerConfiguration struct {
ReconcilerSyncLoopPeriod metav1.Duration
}
// DefaultCaffe2JobControllerConfiguration is the suggested caffe2-operator configuration for production.
var DefaultCaffe2JobControllerConfiguration ControllerConfiguration = ControllerConfiguration{
ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 15 * time.Second},
}
type Controller struct {
config ControllerConfiguration
// podControl is used to add or delete pods.
podControl PodControlInterface
// serviceControl is used to add or delete services.
serviceControl ServiceControlInterface
// kubeClient is a standard kubernetes clientset.
kubeClient kubernetes.Interface
|
// caffe2JobLister can list/get caffe2jobs from the shared informer's store.
caffe2JobLister listers.Caffe2JobLister
// podLister can list/get pods from the shared informer's store.
podLister corelisters.PodLister
// serviceLister can list/get services from the shared informer's store.
serviceLister corelisters.ServiceLister
podInformer clientv1.PodInformer
caffe2JobInformer v1alpha1.Caffe2JobInformer
// returns true if the caffe2job store has been synced at least once.
caffe2JobSynced cache.InformerSynced
// podListerSynced returns true if the pod store has been synced at least once.
podListerSynced cache.InformerSynced
// serviceListerSynced returns true if the service store has been synced at least once.
serviceListerSynced cache.InformerSynced
// WorkQueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time, and makes it easy to ensure we are never processing the same item
// simultaneously in two different workers.
workQueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
// To allow injection of syncCaffe2Job for testing.
syncHandler func(jobKey string) (bool, error)
// To allow injection of updateStatus for testing.
updateStatusHandler func(job *api.Caffe2Job) error
}
func New(kubeClient kubernetes.Interface, caffe2JobClient jobclient.Interface) (*Controller, error) {
kubeflowscheme.AddToScheme(scheme.Scheme)
glog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: controllerName})
podControl := RealPodControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "caffe2job-controller"}),
}
serviceControl := RealServiceControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "caffe2job-controller"}),
}
controller := &Controller{
podControl: podControl,
serviceControl: serviceControl,
kubeClient: kubeClient,
caffe2JobClient: caffe2JobClient,
workQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Caffe2jobs"),
recorder: recorder,
}
caffe2JobInformerFactory := informers.NewSharedInformerFactory(caffe2JobClient, time.Second*30)
podInformerFactory := k8sinformers.NewSharedInformerFactory(kubeClient, time.Second*30)
controller.caffe2JobInformer = caffe2JobInformerFactory.Kubeflow().V1alpha1().Caffe2Jobs()
glog.Info("Setting up event handlers")
// Set up an event handler for when Foo resources change
controller.caffe2JobInformer.Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch t := obj.(type) {
case *api.Caffe2Job:
glog.V(4).Infof("filter caffe2job name: %v", t.Name)
return true
default:
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: controller.addCaffe2Job,
UpdateFunc: controller.updateCaffe2Job,
DeleteFunc: controller.enqueueController,
},
})
controller.caffe2JobLister = controller.caffe2JobInformer.Lister()
controller.caffe2JobSynced = controller.caffe2JobInformer.Informer().HasSynced
// create informer for pod information
controller.podInformer = podInformerFactory.Core().V1().Pods()
controller.podInformer.Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch obj.(type) {
case *v1.Pod:
pod := obj.(*v1.Pod)
if _, ok := pod.Labels["caffe2_job_key"]; !ok {
return false
}
return pod.Status.Phase == v1.PodRunning || pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed
default:
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: controller.addPod,
UpdateFunc: controller.updatePod,
DeleteFunc: controller.deletePod,
},
})
controller.podLister = controller.podInformer.Lister()
controller.podListerSynced = controller.podInformer.Informer().HasSynced
controller.syncHandler = controller.syncCaffe2Job
controller.updateStatusHandler = controller.updateCaffe2JobStatus
return controller, nil
}
// Run will set up the event handlers for types we are interested in, as well
// as syncing informer caches and starting workers. It will block until stopCh
// is closed, at which point it will shutdown the workqueue and wait for
// workers to finish processing their current work items.
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer runtime.HandleCrash()
defer c.workQueue.ShutDown()
go c.podInformer.Informer().Run(stopCh)
go c.caffe2JobInformer.Informer().Run(stopCh)
// Start the informer factories to begin populating the informer caches
glog.Info("Starting Caffe2Job controller")
// Wait for the caches to be synced before starting workers
glog.Info("Waiting for informer caches to sync")
glog.V(4).Info("Sync caffe2jobs...")
if ok := cache.WaitForCacheSync(stopCh, c.caffe2JobSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
glog.V(4).Info("Sync pods...")
if ok := cache.WaitForCacheSync(stopCh, c.podListerSynced); !ok {
return fmt.Errorf("failed to wait for pod caches to sync")
}
glog.Infof("Starting %v workers", threadiness)
// Launch workers to process Caffe2Job resources
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
glog.Info("Started workers")
<-stopCh
glog.Info("Shutting down workers")
return nil
}
// runWorker is a long-running function that will continually call the
// processNextWorkItem function in order to read and process a message on the
// workqueue.
func (c *Controller) runWorker() {
for c.processNextWorkItem() {
}
}
// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the syncHandler.
func (c *Controller) processNextWorkItem() bool {
key, quit := c.workQueue.Get()
if quit {
return false
}
defer c.workQueue.Done(key)
forget, err := c.syncHandler(key.(string))
if err == nil {
if forget {
c.workQueue.Forget(key)
}
return true
}
utilruntime.HandleError(fmt.Errorf("Error syncing job: %v", err))
c.workQueue.AddRateLimited(key)
return true
}
// syncCaffe2Job will sync the job with the given. This function is not meant to be invoked
// concurrently with the same key.
//
// When a job is completely processed it will return true indicating that its ok to forget about this job since
// no more processing will occur for it.
func (c *Controller) syncCaffe2Job(key string) (bool, error) {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime))
}()
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return false, err
}
if len(ns) == 0 || len(name) == 0
|
// caffe2JobClientSet is a clientset for CRD Caffe2Job.
caffe2JobClient jobclient.Interface
|
random_line_split
|
controller.go
|
,
}
type ControllerConfiguration struct {
ReconcilerSyncLoopPeriod metav1.Duration
}
// DefaultCaffe2JobControllerConfiguration is the suggested caffe2-operator configuration for production.
var DefaultCaffe2JobControllerConfiguration ControllerConfiguration = ControllerConfiguration{
ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 15 * time.Second},
}
type Controller struct {
config ControllerConfiguration
// podControl is used to add or delete pods.
podControl PodControlInterface
// serviceControl is used to add or delete services.
serviceControl ServiceControlInterface
// kubeClient is a standard kubernetes clientset.
kubeClient kubernetes.Interface
// caffe2JobClientSet is a clientset for CRD Caffe2Job.
caffe2JobClient jobclient.Interface
// caffe2JobLister can list/get caffe2jobs from the shared informer's store.
caffe2JobLister listers.Caffe2JobLister
// podLister can list/get pods from the shared informer's store.
podLister corelisters.PodLister
// serviceLister can list/get services from the shared informer's store.
serviceLister corelisters.ServiceLister
podInformer clientv1.PodInformer
caffe2JobInformer v1alpha1.Caffe2JobInformer
// returns true if the caffe2job store has been synced at least once.
caffe2JobSynced cache.InformerSynced
// podListerSynced returns true if the pod store has been synced at least once.
podListerSynced cache.InformerSynced
// serviceListerSynced returns true if the service store has been synced at least once.
serviceListerSynced cache.InformerSynced
// WorkQueue is a rate limited work queue. This is used to queue work to be
// processed instead of performing it as soon as a change happens. This
// means we can ensure we only process a fixed amount of resources at a
// time, and makes it easy to ensure we are never processing the same item
// simultaneously in two different workers.
workQueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder
// To allow injection of syncCaffe2Job for testing.
syncHandler func(jobKey string) (bool, error)
// To allow injection of updateStatus for testing.
updateStatusHandler func(job *api.Caffe2Job) error
}
func New(kubeClient kubernetes.Interface, caffe2JobClient jobclient.Interface) (*Controller, error) {
kubeflowscheme.AddToScheme(scheme.Scheme)
glog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: controllerName})
podControl := RealPodControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "caffe2job-controller"}),
}
serviceControl := RealServiceControl{
KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "caffe2job-controller"}),
}
controller := &Controller{
podControl: podControl,
serviceControl: serviceControl,
kubeClient: kubeClient,
caffe2JobClient: caffe2JobClient,
workQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Caffe2jobs"),
recorder: recorder,
}
caffe2JobInformerFactory := informers.NewSharedInformerFactory(caffe2JobClient, time.Second*30)
podInformerFactory := k8sinformers.NewSharedInformerFactory(kubeClient, time.Second*30)
controller.caffe2JobInformer = caffe2JobInformerFactory.Kubeflow().V1alpha1().Caffe2Jobs()
glog.Info("Setting up event handlers")
// Set up an event handler for when Foo resources change
controller.caffe2JobInformer.Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch t := obj.(type) {
case *api.Caffe2Job:
glog.V(4).Infof("filter caffe2job name: %v", t.Name)
return true
default:
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: controller.addCaffe2Job,
UpdateFunc: controller.updateCaffe2Job,
DeleteFunc: controller.enqueueController,
},
})
controller.caffe2JobLister = controller.caffe2JobInformer.Lister()
controller.caffe2JobSynced = controller.caffe2JobInformer.Informer().HasSynced
// create informer for pod information
controller.podInformer = podInformerFactory.Core().V1().Pods()
controller.podInformer.Informer().AddEventHandler(
cache.FilteringResourceEventHandler{
FilterFunc: func(obj interface{}) bool {
switch obj.(type) {
case *v1.Pod:
pod := obj.(*v1.Pod)
if _, ok := pod.Labels["caffe2_job_key"]; !ok {
return false
}
return pod.Status.Phase == v1.PodRunning || pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed
default:
return false
}
},
Handler: cache.ResourceEventHandlerFuncs{
AddFunc: controller.addPod,
UpdateFunc: controller.updatePod,
DeleteFunc: controller.deletePod,
},
})
controller.podLister = controller.podInformer.Lister()
controller.podListerSynced = controller.podInformer.Informer().HasSynced
controller.syncHandler = controller.syncCaffe2Job
controller.updateStatusHandler = controller.updateCaffe2JobStatus
return controller, nil
}
// Run will set up the event handlers for types we are interested in, as well
// as syncing informer caches and starting workers. It will block until stopCh
// is closed, at which point it will shutdown the workqueue and wait for
// workers to finish processing their current work items.
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer runtime.HandleCrash()
defer c.workQueue.ShutDown()
go c.podInformer.Informer().Run(stopCh)
go c.caffe2JobInformer.Informer().Run(stopCh)
// Start the informer factories to begin populating the informer caches
glog.Info("Starting Caffe2Job controller")
// Wait for the caches to be synced before starting workers
glog.Info("Waiting for informer caches to sync")
glog.V(4).Info("Sync caffe2jobs...")
if ok := cache.WaitForCacheSync(stopCh, c.caffe2JobSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
glog.V(4).Info("Sync pods...")
if ok := cache.WaitForCacheSync(stopCh, c.podListerSynced); !ok {
return fmt.Errorf("failed to wait for pod caches to sync")
}
glog.Infof("Starting %v workers", threadiness)
// Launch workers to process Caffe2Job resources
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
glog.Info("Started workers")
<-stopCh
glog.Info("Shutting down workers")
return nil
}
// runWorker is a long-running function that will continually call the
// processNextWorkItem function in order to read and process a message on the
// workqueue.
func (c *Controller) runWorker() {
for c.processNextWorkItem() {
}
}
// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the syncHandler.
func (c *Controller)
|
() bool {
key, quit := c.workQueue.Get()
if quit {
return false
}
defer c.workQueue.Done(key)
forget, err := c.syncHandler(key.(string))
if err == nil {
if forget {
c.workQueue.Forget(key)
}
return true
}
utilruntime.HandleError(fmt.Errorf("Error syncing job: %v", err))
c.workQueue.AddRateLimited(key)
return true
}
// syncCaffe2Job will sync the job with the given. This function is not meant to be invoked
// concurrently with the same key.
//
// When a job is completely processed it will return true indicating that its ok to forget about this job since
// no more processing will occur for it.
func (c *Controller) syncCaffe2Job(key string) (bool, error) {
startTime := time.Now()
defer func() {
glog.V(4).Infof("Finished syncing job %q (%v)", key, time.Since(startTime))
}()
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return false, err
}
if len(ns) == 0 || len(name) ==
|
processNextWorkItem
|
identifier_name
|
featureFileReader.js
|
permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
import FeatureParser from "./featureParser.js"
import SegParser from "./segParser.js"
import VcfParser from "../variant/vcfParser.js"
import {BGZip, FileUtils, igvxhr, URIUtils} from "../../node_modules/igv-utils/src/index.js"
import {buildOptions, isDataURL} from "../util/igvUtils.js"
import GWASParser from "../gwas/gwasParser.js"
import AEDParser from "../aed/AEDParser.js"
import {loadIndex} from "../bam/indexFactory.js"
import getDataWrapper from "./dataWrapper.js"
import BGZLineReader from "../util/bgzLineReader.js"
import BGZBlockLoader from "../bam/bgzBlockLoader.js"
/**
* Reader for "bed like" files (tab delimited files with 1 feature per line: bed, gff, vcf, etc)
*
* @param config
* @constructor
*/
class FeatureFileReader {
constructor(config, genome) {
var uriParts
this.config = config || {}
this.genome = genome
this.indexURL = config.indexURL
this.indexed = config.indexed || this.indexURL !== undefined
this.queryable = this.indexed
if (FileUtils.isFile(this.config.url)) {
this.filename = this.config.url.name
} else if (isDataURL(this.config.url)) {
this.indexed = false // by definition
this.dataURI = config.url
} else {
uriParts = URIUtils.parseUri(this.config.url)
this.filename = config.filename || uriParts.file
}
this.parser = this.getParser(this.config)
if (this.config.format === "vcf" && !this.config.indexURL) {
console.warn("Warning: index file not specified. The entire vcf file will be loaded.")
}
}
async defaultVisibilityWindow() {
if (this.config.indexURL) {
const index = await this.getIndex()
if (index && index.lastBlockPosition) {
let gl = 0
const s = 10000
for (let c of index.chromosomeNames) {
const chromosome = this.genome.getChromosome(c)
if (chromosome) {
gl += chromosome.bpLength
}
}
return Math.round((gl / index.lastBlockPosition) * s)
}
}
}
/**
* Return a promise to load features for the genomic interval
* @param chr
* @param start
* @param end
*/
async readFeatures(chr, start, end) {
const index = await this.getIndex()
if (index) {
this.indexed = true
return this.loadFeaturesWithIndex(chr, start, end)
} else if (this.dataURI) {
this.indexed = false
return this.loadFeaturesFromDataURI()
} else {
this.indexed = false
return this.loadFeaturesNoIndex()
}
}
async readHeader() {
if (this.dataURI) {
await this.loadFeaturesFromDataURI(this.dataURI)
return this.header
} else {
if (this.config.indexURL) {
const index = await this.getIndex()
if (!index) {
// Note - it should be impossible to get here
throw new Error("Unable to load index: " + this.config.indexURL)
}
let dataWrapper
if (index.tabix) {
this._blockLoader = new BGZBlockLoader(this.config);
dataWrapper = new BGZLineReader(this.config)
} else {
// Tribble
const maxSize = Object.values(index.chrIndex)
.flatMap(chr => chr.blocks)
.map(block => block.max)
.reduce((previous, current) =>
Math.min(previous, current), Number.MAX_SAFE_INTEGER)
const options = buildOptions(this.config, {bgz: index.tabix, range: {start: 0, size: maxSize}})
const data = await igvxhr.loadString(this.config.url, options)
dataWrapper = getDataWrapper(data)
}
this.header = await this.parser.parseHeader(dataWrapper) // Cache header, might be needed to parse features
return this.header
} else {
// If this is a non-indexed file we will load all features in advance
const options = buildOptions(this.config)
const data = await igvxhr.loadString(this.config.url, options)
let dataWrapper = getDataWrapper(data)
this.header = await this.parser.parseHeader(dataWrapper)
// Reset data wrapper and parse features
dataWrapper = getDataWrapper(data)
this.features = await this.parser.parseFeatures(dataWrapper) // cache features
return this.header
}
}
}
getParser(config) {
switch (config.format) {
case "vcf":
return new VcfParser(config)
case "seg" :
return new SegParser("seg")
case "mut":
return new SegParser("mut")
case "maf":
return new SegParser("maf")
case "gwas" :
return new GWASParser(config)
case "aed" :
return new AEDParser(config)
default:
return new FeatureParser(config)
}
}
async loadFeaturesNoIndex() {
if (this.features) {
// An optimization hack for non-indexed files, features are temporarily cached when header is read.
const tmp = this.features
delete this.features
return tmp
} else {
const options = buildOptions(this.config) // Add oauth token, if any
const data = await igvxhr.loadString(this.config.url, options)
if (!this.header) {
const dataWrapper = getDataWrapper(data)
this.header = await this.parser.parseHeader(dataWrapper)
}
const dataWrapper = getDataWrapper(data)
const features = await this.parser.parseFeatures(dataWrapper) // <= PARSING DONE HERE
return features
}
}
async
|
(chr, start, end) {
// insure that header has been loaded -- tabix _blockLoader is initialized as side effect
if(!this.dataURI && !this.header) {
await this.readHeader()
}
//console.log("Using index"
const config = this.config
const parser = this.parser
const tabix = this.index.tabix
const refId = tabix ? this.index.sequenceIndexMap[chr] : chr
if (refId === undefined) {
return []
}
const genome = this.genome
const chunks = this.index.chunksForRange(refId, start, end)
if (!chunks || chunks.length === 0) {
return []
} else {
const allFeatures = []
for (let chunk of chunks) {
let inflated
if (tabix) {
inflated = await this._blockLoader.getData(chunk.minv, chunk.maxv)
} else {
const options = buildOptions(config, {
range: {
start: chunk.minv.block,
size: chunk.maxv.block - chunk.minv.block + 1
}
})
inflated = await igvxhr.loadString(config.url, options)
}
const slicedData = chunk.minv.offset ? inflated.slice(chunk.minv.offset) : inflated
const dataWrapper = getDataWrapper(slicedData)
let slicedFeatures = await parser.parseFeatures(dataWrapper)
// Filter psuedo-features (e.g. created mates for VCF SV records)
slicedFeatures = slicedFeatures.filter(f => f._f === undefined)
// Filter features not in requested range.
let inInterval = false
for (let i = 0; i < slicedFeatures.length; i++) {
const f = slicedFeatures[i]
const canonicalChromosome = genome ? genome.getChromosomeName(f.chr) : f.chr
if (canonicalChromosome !== chr) {
if (allFeatures.length === 0) {
continue //adjacent chr to the left
} else {
break //adjacent chr to the right
}
}
if (f.start > end) {
allFeatures.push(f) // First feature beyond interval
break
}
if (f.end >= start && f.start <= end) {
if (!inInterval) {
inInterval = true
if (i > 0) {
allFeatures.push(slicedFeatures[i - 1])
} else {
// TODO -- get block before this one for first feature;
}
}
allFeatures.push(f)
}
}
}
allFeatures.sort(function (a, b) {
return a.start - b.start
})
return allFeatures
}
}
async getIndex() {
if (this.index) {
return this.index
} else if (this.config
|
loadFeaturesWithIndex
|
identifier_name
|
featureFileReader.js
|
import GWASParser from "../gwas/gwasParser.js"
import AEDParser from "../aed/AEDParser.js"
import {loadIndex} from "../bam/indexFactory.js"
import getDataWrapper from "./dataWrapper.js"
import BGZLineReader from "../util/bgzLineReader.js"
import BGZBlockLoader from "../bam/bgzBlockLoader.js"
/**
* Reader for "bed like" files (tab delimited files with 1 feature per line: bed, gff, vcf, etc)
*
* @param config
* @constructor
*/
class FeatureFileReader {
constructor(config, genome) {
var uriParts
this.config = config || {}
this.genome = genome
this.indexURL = config.indexURL
this.indexed = config.indexed || this.indexURL !== undefined
this.queryable = this.indexed
if (FileUtils.isFile(this.config.url)) {
this.filename = this.config.url.name
} else if (isDataURL(this.config.url)) {
this.indexed = false // by definition
this.dataURI = config.url
} else {
uriParts = URIUtils.parseUri(this.config.url)
this.filename = config.filename || uriParts.file
}
this.parser = this.getParser(this.config)
if (this.config.format === "vcf" && !this.config.indexURL) {
console.warn("Warning: index file not specified. The entire vcf file will be loaded.")
}
}
async defaultVisibilityWindow() {
if (this.config.indexURL) {
const index = await this.getIndex()
if (index && index.lastBlockPosition) {
let gl = 0
const s = 10000
for (let c of index.chromosomeNames) {
const chromosome = this.genome.getChromosome(c)
if (chromosome) {
gl += chromosome.bpLength
}
}
return Math.round((gl / index.lastBlockPosition) * s)
}
}
}
/**
* Return a promise to load features for the genomic interval
* @param chr
* @param start
* @param end
*/
async readFeatures(chr, start, end) {
const index = await this.getIndex()
if (index) {
this.indexed = true
return this.loadFeaturesWithIndex(chr, start, end)
} else if (this.dataURI) {
this.indexed = false
return this.loadFeaturesFromDataURI()
} else {
this.indexed = false
return this.loadFeaturesNoIndex()
}
}
async readHeader() {
if (this.dataURI) {
await this.loadFeaturesFromDataURI(this.dataURI)
return this.header
} else {
if (this.config.indexURL) {
const index = await this.getIndex()
if (!index) {
// Note - it should be impossible to get here
throw new Error("Unable to load index: " + this.config.indexURL)
}
let dataWrapper
if (index.tabix) {
this._blockLoader = new BGZBlockLoader(this.config);
dataWrapper = new BGZLineReader(this.config)
} else {
// Tribble
const maxSize = Object.values(index.chrIndex)
.flatMap(chr => chr.blocks)
.map(block => block.max)
.reduce((previous, current) =>
Math.min(previous, current), Number.MAX_SAFE_INTEGER)
const options = buildOptions(this.config, {bgz: index.tabix, range: {start: 0, size: maxSize}})
const data = await igvxhr.loadString(this.config.url, options)
dataWrapper = getDataWrapper(data)
}
this.header = await this.parser.parseHeader(dataWrapper) // Cache header, might be needed to parse features
return this.header
} else {
// If this is a non-indexed file we will load all features in advance
const options = buildOptions(this.config)
const data = await igvxhr.loadString(this.config.url, options)
let dataWrapper = getDataWrapper(data)
this.header = await this.parser.parseHeader(dataWrapper)
// Reset data wrapper and parse features
dataWrapper = getDataWrapper(data)
this.features = await this.parser.parseFeatures(dataWrapper) // cache features
return this.header
}
}
}
getParser(config) {
switch (config.format) {
case "vcf":
return new VcfParser(config)
case "seg" :
return new SegParser("seg")
case "mut":
return new SegParser("mut")
case "maf":
return new SegParser("maf")
case "gwas" :
return new GWASParser(config)
case "aed" :
return new AEDParser(config)
default:
return new FeatureParser(config)
}
}
async loadFeaturesNoIndex() {
if (this.features) {
// An optimization hack for non-indexed files, features are temporarily cached when header is read.
const tmp = this.features
delete this.features
return tmp
} else {
const options = buildOptions(this.config) // Add oauth token, if any
const data = await igvxhr.loadString(this.config.url, options)
if (!this.header) {
const dataWrapper = getDataWrapper(data)
this.header = await this.parser.parseHeader(dataWrapper)
}
const dataWrapper = getDataWrapper(data)
const features = await this.parser.parseFeatures(dataWrapper) // <= PARSING DONE HERE
return features
}
}
async loadFeaturesWithIndex(chr, start, end) {
// insure that header has been loaded -- tabix _blockLoader is initialized as side effect
if(!this.dataURI && !this.header) {
await this.readHeader()
}
//console.log("Using index"
const config = this.config
const parser = this.parser
const tabix = this.index.tabix
const refId = tabix ? this.index.sequenceIndexMap[chr] : chr
if (refId === undefined) {
return []
}
const genome = this.genome
const chunks = this.index.chunksForRange(refId, start, end)
if (!chunks || chunks.length === 0) {
return []
} else {
const allFeatures = []
for (let chunk of chunks) {
let inflated
if (tabix) {
inflated = await this._blockLoader.getData(chunk.minv, chunk.maxv)
} else {
const options = buildOptions(config, {
range: {
start: chunk.minv.block,
size: chunk.maxv.block - chunk.minv.block + 1
}
})
inflated = await igvxhr.loadString(config.url, options)
}
const slicedData = chunk.minv.offset ? inflated.slice(chunk.minv.offset) : inflated
const dataWrapper = getDataWrapper(slicedData)
let slicedFeatures = await parser.parseFeatures(dataWrapper)
// Filter psuedo-features (e.g. created mates for VCF SV records)
slicedFeatures = slicedFeatures.filter(f => f._f === undefined)
// Filter features not in requested range.
let inInterval = false
for (let i = 0; i < slicedFeatures.length; i++) {
const f = slicedFeatures[i]
const canonicalChromosome = genome ? genome.getChromosomeName(f.chr) : f.chr
if (canonicalChromosome !== chr) {
if (allFeatures.length === 0) {
continue //adjacent chr to the left
} else {
break //adjacent chr to the right
}
}
if (f.start > end) {
allFeatures.push(f) // First feature beyond interval
break
}
if (f.end >= start && f.start <= end) {
if (!inInterval) {
inInterval = true
if (i > 0) {
allFeatures.push(slicedFeatures[i - 1])
} else {
// TODO -- get block before this one for first feature;
}
}
allFeatures.push(f)
}
}
}
allFeatures.sort(function (a, b) {
return a.start - b.start
})
return allFeatures
}
}
async getIndex() {
if (this.index) {
return this.index
} else if (this.config.indexURL) {
this.index = await this.loadIndex()
return this.index
}
}
/**
* Return a Promise for the async loaded index
*/
async loadIndex() {
const indexURL = this.config.indexURL
return loadIndex(indexURL, this.config, this.genome)
}
async loadFeaturesFromDataURI() {
if (this.features) {
// An optimization hack for non-indexed files, features are temporarily cached when header is read.
const tmp = this.features
delete this.features
return tmp
} else {
const plain = BGZip.decodeDataURI(this.dataURI)
let dataWrapper = getDataWrapper(plain)
this.header = await this.parser.parseHeader(dataWrapper)
if (this.header instanceof String && this.header.startsWith("##gff-version 3")) {
this.format = 'gff3'
}
dataWrapper = getDataWrapper(plain)
this.features = await this.parser.parseFeatures(dataWrapper)
return this.features
|
}
}
}
|
random_line_split
|
|
featureFileReader.js
|
permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
import FeatureParser from "./featureParser.js"
import SegParser from "./segParser.js"
import VcfParser from "../variant/vcfParser.js"
import {BGZip, FileUtils, igvxhr, URIUtils} from "../../node_modules/igv-utils/src/index.js"
import {buildOptions, isDataURL} from "../util/igvUtils.js"
import GWASParser from "../gwas/gwasParser.js"
import AEDParser from "../aed/AEDParser.js"
import {loadIndex} from "../bam/indexFactory.js"
import getDataWrapper from "./dataWrapper.js"
import BGZLineReader from "../util/bgzLineReader.js"
import BGZBlockLoader from "../bam/bgzBlockLoader.js"
/**
* Reader for "bed like" files (tab delimited files with 1 feature per line: bed, gff, vcf, etc)
*
* @param config
* @constructor
*/
class FeatureFileReader {
constructor(config, genome) {
var uriParts
this.config = config || {}
this.genome = genome
this.indexURL = config.indexURL
this.indexed = config.indexed || this.indexURL !== undefined
this.queryable = this.indexed
if (FileUtils.isFile(this.config.url)) {
this.filename = this.config.url.name
} else if (isDataURL(this.config.url)) {
this.indexed = false // by definition
this.dataURI = config.url
} else {
uriParts = URIUtils.parseUri(this.config.url)
this.filename = config.filename || uriParts.file
}
this.parser = this.getParser(this.config)
if (this.config.format === "vcf" && !this.config.indexURL) {
console.warn("Warning: index file not specified. The entire vcf file will be loaded.")
}
}
async defaultVisibilityWindow() {
if (this.config.indexURL) {
const index = await this.getIndex()
if (index && index.lastBlockPosition) {
let gl = 0
const s = 10000
for (let c of index.chromosomeNames) {
const chromosome = this.genome.getChromosome(c)
if (chromosome) {
gl += chromosome.bpLength
}
}
return Math.round((gl / index.lastBlockPosition) * s)
}
}
}
/**
* Return a promise to load features for the genomic interval
* @param chr
* @param start
* @param end
*/
async readFeatures(chr, start, end) {
const index = await this.getIndex()
if (index) {
this.indexed = true
return this.loadFeaturesWithIndex(chr, start, end)
} else if (this.dataURI) {
this.indexed = false
return this.loadFeaturesFromDataURI()
} else {
this.indexed = false
return this.loadFeaturesNoIndex()
}
}
async readHeader()
|
const maxSize = Object.values(index.chrIndex)
.flatMap(chr => chr.blocks)
.map(block => block.max)
.reduce((previous, current) =>
Math.min(previous, current), Number.MAX_SAFE_INTEGER)
const options = buildOptions(this.config, {bgz: index.tabix, range: {start: 0, size: maxSize}})
const data = await igvxhr.loadString(this.config.url, options)
dataWrapper = getDataWrapper(data)
}
this.header = await this.parser.parseHeader(dataWrapper) // Cache header, might be needed to parse features
return this.header
} else {
// If this is a non-indexed file we will load all features in advance
const options = buildOptions(this.config)
const data = await igvxhr.loadString(this.config.url, options)
let dataWrapper = getDataWrapper(data)
this.header = await this.parser.parseHeader(dataWrapper)
// Reset data wrapper and parse features
dataWrapper = getDataWrapper(data)
this.features = await this.parser.parseFeatures(dataWrapper) // cache features
return this.header
}
}
}
getParser(config) {
switch (config.format) {
case "vcf":
return new VcfParser(config)
case "seg" :
return new SegParser("seg")
case "mut":
return new SegParser("mut")
case "maf":
return new SegParser("maf")
case "gwas" :
return new GWASParser(config)
case "aed" :
return new AEDParser(config)
default:
return new FeatureParser(config)
}
}
async loadFeaturesNoIndex() {
if (this.features) {
// An optimization hack for non-indexed files, features are temporarily cached when header is read.
const tmp = this.features
delete this.features
return tmp
} else {
const options = buildOptions(this.config) // Add oauth token, if any
const data = await igvxhr.loadString(this.config.url, options)
if (!this.header) {
const dataWrapper = getDataWrapper(data)
this.header = await this.parser.parseHeader(dataWrapper)
}
const dataWrapper = getDataWrapper(data)
const features = await this.parser.parseFeatures(dataWrapper) // <= PARSING DONE HERE
return features
}
}
async loadFeaturesWithIndex(chr, start, end) {
// insure that header has been loaded -- tabix _blockLoader is initialized as side effect
if(!this.dataURI && !this.header) {
await this.readHeader()
}
//console.log("Using index"
const config = this.config
const parser = this.parser
const tabix = this.index.tabix
const refId = tabix ? this.index.sequenceIndexMap[chr] : chr
if (refId === undefined) {
return []
}
const genome = this.genome
const chunks = this.index.chunksForRange(refId, start, end)
if (!chunks || chunks.length === 0) {
return []
} else {
const allFeatures = []
for (let chunk of chunks) {
let inflated
if (tabix) {
inflated = await this._blockLoader.getData(chunk.minv, chunk.maxv)
} else {
const options = buildOptions(config, {
range: {
start: chunk.minv.block,
size: chunk.maxv.block - chunk.minv.block + 1
}
})
inflated = await igvxhr.loadString(config.url, options)
}
const slicedData = chunk.minv.offset ? inflated.slice(chunk.minv.offset) : inflated
const dataWrapper = getDataWrapper(slicedData)
let slicedFeatures = await parser.parseFeatures(dataWrapper)
// Filter psuedo-features (e.g. created mates for VCF SV records)
slicedFeatures = slicedFeatures.filter(f => f._f === undefined)
// Filter features not in requested range.
let inInterval = false
for (let i = 0; i < slicedFeatures.length; i++) {
const f = slicedFeatures[i]
const canonicalChromosome = genome ? genome.getChromosomeName(f.chr) : f.chr
if (canonicalChromosome !== chr) {
if (allFeatures.length === 0) {
continue //adjacent chr to the left
} else {
break //adjacent chr to the right
}
}
if (f.start > end) {
allFeatures.push(f) // First feature beyond interval
break
}
if (f.end >= start && f.start <= end) {
if (!inInterval) {
inInterval = true
if (i > 0) {
allFeatures.push(slicedFeatures[i - 1])
} else {
// TODO -- get block before this one for first feature;
}
}
allFeatures.push(f)
}
}
}
allFeatures.sort(function (a, b) {
return a.start - b.start
})
return allFeatures
}
}
async getIndex() {
if (this.index) {
return this.index
} else if (this.config.index
|
{
if (this.dataURI) {
await this.loadFeaturesFromDataURI(this.dataURI)
return this.header
} else {
if (this.config.indexURL) {
const index = await this.getIndex()
if (!index) {
// Note - it should be impossible to get here
throw new Error("Unable to load index: " + this.config.indexURL)
}
let dataWrapper
if (index.tabix) {
this._blockLoader = new BGZBlockLoader(this.config);
dataWrapper = new BGZLineReader(this.config)
} else {
// Tribble
|
identifier_body
|
featureFileReader.js
|
permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
import FeatureParser from "./featureParser.js"
import SegParser from "./segParser.js"
import VcfParser from "../variant/vcfParser.js"
import {BGZip, FileUtils, igvxhr, URIUtils} from "../../node_modules/igv-utils/src/index.js"
import {buildOptions, isDataURL} from "../util/igvUtils.js"
import GWASParser from "../gwas/gwasParser.js"
import AEDParser from "../aed/AEDParser.js"
import {loadIndex} from "../bam/indexFactory.js"
import getDataWrapper from "./dataWrapper.js"
import BGZLineReader from "../util/bgzLineReader.js"
import BGZBlockLoader from "../bam/bgzBlockLoader.js"
/**
* Reader for "bed like" files (tab delimited files with 1 feature per line: bed, gff, vcf, etc)
*
* @param config
* @constructor
*/
class FeatureFileReader {
constructor(config, genome) {
var uriParts
this.config = config || {}
this.genome = genome
this.indexURL = config.indexURL
this.indexed = config.indexed || this.indexURL !== undefined
this.queryable = this.indexed
if (FileUtils.isFile(this.config.url)) {
this.filename = this.config.url.name
} else if (isDataURL(this.config.url)) {
this.indexed = false // by definition
this.dataURI = config.url
} else {
uriParts = URIUtils.parseUri(this.config.url)
this.filename = config.filename || uriParts.file
}
this.parser = this.getParser(this.config)
if (this.config.format === "vcf" && !this.config.indexURL) {
console.warn("Warning: index file not specified. The entire vcf file will be loaded.")
}
}
async defaultVisibilityWindow() {
if (this.config.indexURL) {
const index = await this.getIndex()
if (index && index.lastBlockPosition) {
let gl = 0
const s = 10000
for (let c of index.chromosomeNames) {
const chromosome = this.genome.getChromosome(c)
if (chromosome) {
gl += chromosome.bpLength
}
}
return Math.round((gl / index.lastBlockPosition) * s)
}
}
}
/**
* Return a promise to load features for the genomic interval
* @param chr
* @param start
* @param end
*/
async readFeatures(chr, start, end) {
const index = await this.getIndex()
if (index) {
this.indexed = true
return this.loadFeaturesWithIndex(chr, start, end)
} else if (this.dataURI) {
this.indexed = false
return this.loadFeaturesFromDataURI()
} else {
this.indexed = false
return this.loadFeaturesNoIndex()
}
}
async readHeader() {
if (this.dataURI) {
await this.loadFeaturesFromDataURI(this.dataURI)
return this.header
} else {
if (this.config.indexURL) {
const index = await this.getIndex()
if (!index) {
// Note - it should be impossible to get here
throw new Error("Unable to load index: " + this.config.indexURL)
}
let dataWrapper
if (index.tabix) {
this._blockLoader = new BGZBlockLoader(this.config);
dataWrapper = new BGZLineReader(this.config)
} else {
// Tribble
const maxSize = Object.values(index.chrIndex)
.flatMap(chr => chr.blocks)
.map(block => block.max)
.reduce((previous, current) =>
Math.min(previous, current), Number.MAX_SAFE_INTEGER)
const options = buildOptions(this.config, {bgz: index.tabix, range: {start: 0, size: maxSize}})
const data = await igvxhr.loadString(this.config.url, options)
dataWrapper = getDataWrapper(data)
}
this.header = await this.parser.parseHeader(dataWrapper) // Cache header, might be needed to parse features
return this.header
} else {
// If this is a non-indexed file we will load all features in advance
const options = buildOptions(this.config)
const data = await igvxhr.loadString(this.config.url, options)
let dataWrapper = getDataWrapper(data)
this.header = await this.parser.parseHeader(dataWrapper)
// Reset data wrapper and parse features
dataWrapper = getDataWrapper(data)
this.features = await this.parser.parseFeatures(dataWrapper) // cache features
return this.header
}
}
}
getParser(config) {
switch (config.format) {
case "vcf":
return new VcfParser(config)
case "seg" :
return new SegParser("seg")
case "mut":
return new SegParser("mut")
case "maf":
return new SegParser("maf")
case "gwas" :
return new GWASParser(config)
case "aed" :
return new AEDParser(config)
default:
return new FeatureParser(config)
}
}
async loadFeaturesNoIndex() {
if (this.features) {
// An optimization hack for non-indexed files, features are temporarily cached when header is read.
const tmp = this.features
delete this.features
return tmp
} else {
const options = buildOptions(this.config) // Add oauth token, if any
const data = await igvxhr.loadString(this.config.url, options)
if (!this.header) {
const dataWrapper = getDataWrapper(data)
this.header = await this.parser.parseHeader(dataWrapper)
}
const dataWrapper = getDataWrapper(data)
const features = await this.parser.parseFeatures(dataWrapper) // <= PARSING DONE HERE
return features
}
}
async loadFeaturesWithIndex(chr, start, end) {
// insure that header has been loaded -- tabix _blockLoader is initialized as side effect
if(!this.dataURI && !this.header)
|
//console.log("Using index"
const config = this.config
const parser = this.parser
const tabix = this.index.tabix
const refId = tabix ? this.index.sequenceIndexMap[chr] : chr
if (refId === undefined) {
return []
}
const genome = this.genome
const chunks = this.index.chunksForRange(refId, start, end)
if (!chunks || chunks.length === 0) {
return []
} else {
const allFeatures = []
for (let chunk of chunks) {
let inflated
if (tabix) {
inflated = await this._blockLoader.getData(chunk.minv, chunk.maxv)
} else {
const options = buildOptions(config, {
range: {
start: chunk.minv.block,
size: chunk.maxv.block - chunk.minv.block + 1
}
})
inflated = await igvxhr.loadString(config.url, options)
}
const slicedData = chunk.minv.offset ? inflated.slice(chunk.minv.offset) : inflated
const dataWrapper = getDataWrapper(slicedData)
let slicedFeatures = await parser.parseFeatures(dataWrapper)
// Filter psuedo-features (e.g. created mates for VCF SV records)
slicedFeatures = slicedFeatures.filter(f => f._f === undefined)
// Filter features not in requested range.
let inInterval = false
for (let i = 0; i < slicedFeatures.length; i++) {
const f = slicedFeatures[i]
const canonicalChromosome = genome ? genome.getChromosomeName(f.chr) : f.chr
if (canonicalChromosome !== chr) {
if (allFeatures.length === 0) {
continue //adjacent chr to the left
} else {
break //adjacent chr to the right
}
}
if (f.start > end) {
allFeatures.push(f) // First feature beyond interval
break
}
if (f.end >= start && f.start <= end) {
if (!inInterval) {
inInterval = true
if (i > 0) {
allFeatures.push(slicedFeatures[i - 1])
} else {
// TODO -- get block before this one for first feature;
}
}
allFeatures.push(f)
}
}
}
allFeatures.sort(function (a, b) {
return a.start - b.start
})
return allFeatures
}
}
async getIndex() {
if (this.index) {
return this.index
} else if (this
|
{
await this.readHeader()
}
|
conditional_block
|
client.go
|
.URL.Query()
q.Set(k, v)
r.URL.RawQuery = q.Encode()
}
}
func TypeFilter(t MetricType) Filter {
return Param("type", t.shortForm())
}
func TagsFilter(t map[string]string) Filter {
j := tagsEncoder(t)
return Param("tags", j)
}
// Requires HWKMETRICS-233
func IdFilter(regexp string) Filter {
return Param("id", regexp)
}
func StartTimeFilter(duration time.Duration) Filter {
return Param("start", strconv.Itoa(int(duration)))
}
func EndTimeFilter(duration time.Duration) Filter {
return Param("end", strconv.Itoa(int(duration)))
}
func BucketsFilter(buckets int) Filter {
return Param("buckets", strconv.Itoa(buckets))
}
// The SEND method..
func (self *Client) createRequest() *http.Request {
req := &http.Request{
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Host: self.url.Host,
}
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Hawkular-Tenant", self.Tenant)
return req
}
func (self *Client) Send(o ...Modifier) (*http.Response, error) {
// Initialize
r := self.createRequest()
// Run all the modifiers
for _, f := range o {
err := f(r)
if err != nil {
return nil, err
}
}
return self.client.Do(r)
}
// Commands
func prepend(slice []Modifier, a ...Modifier) []Modifier {
p := make([]Modifier, 0, len(slice)+len(a))
p = append(p, a...)
p = append(p, slice...)
return p
}
// Create new Definition
func (self *Client) Create(md MetricDefinition, o ...Modifier) (bool, error) {
// Keep the order, add custom prepend
o = prepend(o, self.Url("POST", TypeEndpoint(md.Type)), Data(md))
r, err := self.Send(o...)
if err != nil {
return false, err
}
defer r.Body.Close()
if r.StatusCode > 399 {
err = self.parseErrorResponse(r)
if err, ok := err.(*HawkularClientError); ok {
if err.Code != http.StatusConflict {
return false, err
} else {
return false, nil
}
}
return false, err
}
return true, nil
}
// Fetch definitions
func (self *Client) Definitions(o ...Modifier) ([]*MetricDefinition, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(Generic)))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
md := []*MetricDefinition{}
if b != nil {
if err = json.Unmarshal(b, &md); err != nil {
return nil, err
}
}
return md, err
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Update tags
func (self *Client) UpdateTags(t MetricType, id string, tags map[string]string, o ...Modifier) error {
o = prepend(o, self.Url("PUT", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint()), Data(tags))
r, err := self.Send(o...)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode > 399 {
return self.parseErrorResponse(r)
}
return nil
}
// Delete given tags from the definition
func (self *Client) DeleteTags(t MetricType, id string, tags map[string]string, o ...Modifier) error {
o = prepend(o, self.Url("DELETE", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint(), TagsEndpoint(tags)))
r, err := self.Send(o...)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode > 399 {
return self.parseErrorResponse(r)
}
return nil
}
// Fetch metric definition tags
func (self *Client) Tags(t MetricType, id string, o ...Modifier) (map[string]string, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint()))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
tags := make(map[string]string)
if b != nil {
if err = json.Unmarshal(b, &tags); err != nil {
return nil, err
}
}
return tags, nil
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Write datapoints to the server
func (self *Client) Write(metrics []MetricHeader, o ...Modifier) error {
if len(metrics) > 0 {
mHs := make(map[MetricType][]MetricHeader)
for _, m := range metrics {
if _, found := mHs[m.Type]; !found {
mHs[m.Type] = make([]MetricHeader, 0, 1)
}
mHs[m.Type] = append(mHs[m.Type], m)
}
wg := &sync.WaitGroup{}
errorsChan := make(chan error, len(mHs))
for k, v := range mHs {
wg.Add(1)
go func(k MetricType, v []MetricHeader) {
defer wg.Done()
// Should be sorted and splitted by type & tenant..
on := o
on = prepend(on, self.Url("POST", TypeEndpoint(k), DataEndpoint()), Data(v))
r, err := self.Send(on...)
if err != nil {
errorsChan <- err
return
}
defer r.Body.Close()
if r.StatusCode > 399 {
errorsChan <- self.parseErrorResponse(r)
}
}(k, v)
}
wg.Wait()
select {
case err, ok := <-errorsChan:
if ok {
return err
}
// If channel is closed, we're done
default:
// Nothing to do
}
}
return nil
}
// Read data from the server
func (self *Client) ReadMetric(t MetricType, id string, o ...Modifier) ([]*Datapoint, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(t), SingleMetricEndpoint(id), DataEndpoint()))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
// Check for GaugeBucketpoint and so on for the rest.. uh
dp := []*Datapoint{}
if b != nil {
if err = json.Unmarshal(b, &dp); err != nil {
return nil, err
}
}
return dp, nil
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Initialization
func NewHawkularClient(p Parameters) (*Client, error) {
if p.Path == ""
|
u := &url.URL{
Host: p.Host,
Path: p.Path,
Scheme: "http",
Opaque: fmt.Sprintf("//%s/%s", p.Host, p.Path),
}
return &Client{
url: u,
Tenant: p.Tenant,
client: &http.Client{
Timeout: timeout,
},
}, nil
}
// Public functions
// Older functions..
// Return a single definition
func (self *Client) Definition(t MetricType, id string) (*MetricDefinition, error) {
url := self.singleMetricsUrl(t, id)
b, err := self.process(url, "GET", nil)
if err != nil {
return nil, err
}
md := MetricDefinition{}
if b != nil {
if err = json.Unmarshal(b, &md); err != nil {
return nil, err
}
}
return &md, nil
}
// Read single Gauge metric's datapoints.
// TODO: Remove and replace with better Read properties? Perhaps with iterators?
func (self *Client) SingleGaugeMetric(id string, options map[string]string) ([]*Datapoint, error) {
id = cleanId(id)
u := self.paramUrl(self.dataUrl(self.singleMetricsUrl(Gauge, id)), options)
// fmt.Printf("Receiving for %s, from: %s\n", self.Tenant, u)
b, err := self.process(u, "GET", nil)
if err != nil {
return nil, err
}
metrics := []*Datapoint{}
if b != nil {
// fmt.Printf("Received: %s\n", string(b))
if err = json.Unmarshal(b, &metrics); err != nil {
return
|
{
p.Path = base_url
}
|
conditional_block
|
client.go
|
r.URL.Query()
q.Set(k, v)
r.URL.RawQuery = q.Encode()
}
}
func TypeFilter(t MetricType) Filter {
return Param("type", t.shortForm())
}
func TagsFilter(t map[string]string) Filter {
j := tagsEncoder(t)
return Param("tags", j)
}
// Requires HWKMETRICS-233
func IdFilter(regexp string) Filter {
return Param("id", regexp)
}
func StartTimeFilter(duration time.Duration) Filter {
return Param("start", strconv.Itoa(int(duration)))
}
func EndTimeFilter(duration time.Duration) Filter {
return Param("end", strconv.Itoa(int(duration)))
}
func BucketsFilter(buckets int) Filter {
return Param("buckets", strconv.Itoa(buckets))
}
// The SEND method..
func (self *Client) createRequest() *http.Request {
req := &http.Request{
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Host: self.url.Host,
}
req.Header.Add("Content-Type", "application/json")
req.Header.Add("Hawkular-Tenant", self.Tenant)
return req
}
func (self *Client) Send(o ...Modifier) (*http.Response, error) {
// Initialize
r := self.createRequest()
// Run all the modifiers
for _, f := range o {
err := f(r)
if err != nil {
return nil, err
}
}
return self.client.Do(r)
}
// Commands
func prepend(slice []Modifier, a ...Modifier) []Modifier {
p := make([]Modifier, 0, len(slice)+len(a))
p = append(p, a...)
p = append(p, slice...)
return p
}
// Create new Definition
func (self *Client) Create(md MetricDefinition, o ...Modifier) (bool, error) {
// Keep the order, add custom prepend
o = prepend(o, self.Url("POST", TypeEndpoint(md.Type)), Data(md))
r, err := self.Send(o...)
if err != nil {
return false, err
}
defer r.Body.Close()
if r.StatusCode > 399 {
err = self.parseErrorResponse(r)
if err, ok := err.(*HawkularClientError); ok {
if err.Code != http.StatusConflict {
return false, err
} else {
return false, nil
}
}
return false, err
}
return true, nil
}
// Fetch definitions
func (self *Client) Definitions(o ...Modifier) ([]*MetricDefinition, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(Generic)))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
|
md := []*MetricDefinition{}
if b != nil {
if err = json.Unmarshal(b, &md); err != nil {
return nil, err
}
}
return md, err
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Update tags
func (self *Client) UpdateTags(t MetricType, id string, tags map[string]string, o ...Modifier) error {
o = prepend(o, self.Url("PUT", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint()), Data(tags))
r, err := self.Send(o...)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode > 399 {
return self.parseErrorResponse(r)
}
return nil
}
// Delete given tags from the definition
func (self *Client) DeleteTags(t MetricType, id string, tags map[string]string, o ...Modifier) error {
o = prepend(o, self.Url("DELETE", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint(), TagsEndpoint(tags)))
r, err := self.Send(o...)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode > 399 {
return self.parseErrorResponse(r)
}
return nil
}
// Fetch metric definition tags
func (self *Client) Tags(t MetricType, id string, o ...Modifier) (map[string]string, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint()))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
tags := make(map[string]string)
if b != nil {
if err = json.Unmarshal(b, &tags); err != nil {
return nil, err
}
}
return tags, nil
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Write datapoints to the server
func (self *Client) Write(metrics []MetricHeader, o ...Modifier) error {
if len(metrics) > 0 {
mHs := make(map[MetricType][]MetricHeader)
for _, m := range metrics {
if _, found := mHs[m.Type]; !found {
mHs[m.Type] = make([]MetricHeader, 0, 1)
}
mHs[m.Type] = append(mHs[m.Type], m)
}
wg := &sync.WaitGroup{}
errorsChan := make(chan error, len(mHs))
for k, v := range mHs {
wg.Add(1)
go func(k MetricType, v []MetricHeader) {
defer wg.Done()
// Should be sorted and splitted by type & tenant..
on := o
on = prepend(on, self.Url("POST", TypeEndpoint(k), DataEndpoint()), Data(v))
r, err := self.Send(on...)
if err != nil {
errorsChan <- err
return
}
defer r.Body.Close()
if r.StatusCode > 399 {
errorsChan <- self.parseErrorResponse(r)
}
}(k, v)
}
wg.Wait()
select {
case err, ok := <-errorsChan:
if ok {
return err
}
// If channel is closed, we're done
default:
// Nothing to do
}
}
return nil
}
// Read data from the server
func (self *Client) ReadMetric(t MetricType, id string, o ...Modifier) ([]*Datapoint, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(t), SingleMetricEndpoint(id), DataEndpoint()))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
// Check for GaugeBucketpoint and so on for the rest.. uh
dp := []*Datapoint{}
if b != nil {
if err = json.Unmarshal(b, &dp); err != nil {
return nil, err
}
}
return dp, nil
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Initialization
func NewHawkularClient(p Parameters) (*Client, error) {
if p.Path == "" {
p.Path = base_url
}
u := &url.URL{
Host: p.Host,
Path: p.Path,
Scheme: "http",
Opaque: fmt.Sprintf("//%s/%s", p.Host, p.Path),
}
return &Client{
url: u,
Tenant: p.Tenant,
client: &http.Client{
Timeout: timeout,
},
}, nil
}
// Public functions
// Older functions..
// Return a single definition
func (self *Client) Definition(t MetricType, id string) (*MetricDefinition, error) {
url := self.singleMetricsUrl(t, id)
b, err := self.process(url, "GET", nil)
if err != nil {
return nil, err
}
md := MetricDefinition{}
if b != nil {
if err = json.Unmarshal(b, &md); err != nil {
return nil, err
}
}
return &md, nil
}
// Read single Gauge metric's datapoints.
// TODO: Remove and replace with better Read properties? Perhaps with iterators?
func (self *Client) SingleGaugeMetric(id string, options map[string]string) ([]*Datapoint, error) {
id = cleanId(id)
u := self.paramUrl(self.dataUrl(self.singleMetricsUrl(Gauge, id)), options)
// fmt.Printf("Receiving for %s, from: %s\n", self.Tenant, u)
b, err := self.process(u, "GET", nil)
if err != nil {
return nil, err
}
metrics := []*Datapoint{}
if b != nil {
// fmt.Printf("Received: %s\n", string(b))
if err = json.Unmarshal(b, &metrics); err != nil {
return nil
|
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
|
random_line_split
|
client.go
|
{
o = prepend(o, self.Url("GET", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint()))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
tags := make(map[string]string)
if b != nil {
if err = json.Unmarshal(b, &tags); err != nil {
return nil, err
}
}
return tags, nil
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Write datapoints to the server
func (self *Client) Write(metrics []MetricHeader, o ...Modifier) error {
if len(metrics) > 0 {
mHs := make(map[MetricType][]MetricHeader)
for _, m := range metrics {
if _, found := mHs[m.Type]; !found {
mHs[m.Type] = make([]MetricHeader, 0, 1)
}
mHs[m.Type] = append(mHs[m.Type], m)
}
wg := &sync.WaitGroup{}
errorsChan := make(chan error, len(mHs))
for k, v := range mHs {
wg.Add(1)
go func(k MetricType, v []MetricHeader) {
defer wg.Done()
// Should be sorted and splitted by type & tenant..
on := o
on = prepend(on, self.Url("POST", TypeEndpoint(k), DataEndpoint()), Data(v))
r, err := self.Send(on...)
if err != nil {
errorsChan <- err
return
}
defer r.Body.Close()
if r.StatusCode > 399 {
errorsChan <- self.parseErrorResponse(r)
}
}(k, v)
}
wg.Wait()
select {
case err, ok := <-errorsChan:
if ok {
return err
}
// If channel is closed, we're done
default:
// Nothing to do
}
}
return nil
}
// Read data from the server
func (self *Client) ReadMetric(t MetricType, id string, o ...Modifier) ([]*Datapoint, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(t), SingleMetricEndpoint(id), DataEndpoint()))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
// Check for GaugeBucketpoint and so on for the rest.. uh
dp := []*Datapoint{}
if b != nil {
if err = json.Unmarshal(b, &dp); err != nil {
return nil, err
}
}
return dp, nil
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Initialization
func NewHawkularClient(p Parameters) (*Client, error) {
if p.Path == "" {
p.Path = base_url
}
u := &url.URL{
Host: p.Host,
Path: p.Path,
Scheme: "http",
Opaque: fmt.Sprintf("//%s/%s", p.Host, p.Path),
}
return &Client{
url: u,
Tenant: p.Tenant,
client: &http.Client{
Timeout: timeout,
},
}, nil
}
// Public functions
// Older functions..
// Return a single definition
func (self *Client) Definition(t MetricType, id string) (*MetricDefinition, error) {
url := self.singleMetricsUrl(t, id)
b, err := self.process(url, "GET", nil)
if err != nil {
return nil, err
}
md := MetricDefinition{}
if b != nil {
if err = json.Unmarshal(b, &md); err != nil {
return nil, err
}
}
return &md, nil
}
// Read single Gauge metric's datapoints.
// TODO: Remove and replace with better Read properties? Perhaps with iterators?
func (self *Client) SingleGaugeMetric(id string, options map[string]string) ([]*Datapoint, error) {
id = cleanId(id)
u := self.paramUrl(self.dataUrl(self.singleMetricsUrl(Gauge, id)), options)
// fmt.Printf("Receiving for %s, from: %s\n", self.Tenant, u)
b, err := self.process(u, "GET", nil)
if err != nil {
return nil, err
}
metrics := []*Datapoint{}
if b != nil {
// fmt.Printf("Received: %s\n", string(b))
if err = json.Unmarshal(b, &metrics); err != nil {
return nil, err
}
}
return metrics, nil
}
// HTTP Helper functions
func cleanId(id string) string {
return url.QueryEscape(id)
}
// Override default http.NewRequest to avoid url.Parse which has a bug (removes valid %2F)
func (self *Client) newRequest(url *url.URL, method string, body io.Reader) (*http.Request, error) {
rc, ok := body.(io.ReadCloser)
if !ok && body != nil {
rc = ioutil.NopCloser(body)
}
req := &http.Request{
Method: method,
URL: url,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Body: rc,
Host: url.Host,
}
if body != nil {
switch v := body.(type) {
case *bytes.Buffer:
req.ContentLength = int64(v.Len())
case *bytes.Reader:
req.ContentLength = int64(v.Len())
case *strings.Reader:
req.ContentLength = int64(v.Len())
}
}
return req, nil
}
// Helper function that transforms struct to json and fetches the correct tenant information
// TODO: Try the decorator pattern to replace all these simple functions?
func (self *Client) process(url *url.URL, method string, data interface{}) ([]byte, error) {
jsonb, err := json.Marshal(&data)
if err != nil {
return nil, err
}
return self.send(url, method, jsonb)
}
func (self *Client) send(url *url.URL, method string, json []byte) ([]byte, error) {
// Have to replicate http.NewRequest here to avoid calling of url.Parse,
// which has a bug when it comes to encoded url
req, _ := self.newRequest(url, method, bytes.NewBuffer(json))
req.Header.Add("Content-Type", "application/json")
// if len(tenant) > 0 {
// req.Header.Add("Hawkular-Tenant", tenant)
// } else {
req.Header.Add("Hawkular-Tenant", self.Tenant)
// }
// fmt.Printf("curl -X %s -H 'Hawkular-Tenant: %s' %s\n", req.Method, req.Header.Get("Hawkular-Tenant"), req.URL)
resp, err := self.client.Do(req)
// fmt.Printf("%s\n", resp.Header.Get("Content-Length"))
// fmt.Printf("%d\n", resp.StatusCode)
if err != nil {
return nil, err
}
// fmt.Printf("Received bytes: %d\n", resp.ContentLength)
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(resp.Body)
return b, err
} else if resp.StatusCode > 399 {
return nil, self.parseErrorResponse(resp)
} else {
return nil, nil // Nothing to answer..
}
}
func (self *Client) parseErrorResponse(resp *http.Response) error {
// Parse error messages here correctly..
reply, err := ioutil.ReadAll(resp.Body)
if err != nil {
return &HawkularClientError{Code: resp.StatusCode,
msg: fmt.Sprintf("Reply could not be read: %s", err.Error()),
}
}
details := &HawkularError{}
err = json.Unmarshal(reply, details)
if err != nil {
return &HawkularClientError{Code: resp.StatusCode,
msg: fmt.Sprintf("Reply could not be parsed: %s", err.Error()),
}
}
return &HawkularClientError{Code: resp.StatusCode,
msg: details.ErrorMsg,
}
}
// URL functions (...)
type Endpoint func(u *url.URL)
func (self *Client) createUrl(e ...Endpoint) *url.URL {
mu := *self.url
for _, f := range e {
f(&mu)
}
return &mu
}
func TypeEndpoint(t MetricType) Endpoint {
return func(u *url.URL) {
addToUrl(u, t.String())
}
}
func SingleMetricEndpoint(id string) Endpoint {
return func(u *url.URL) {
addToUrl(u, url.QueryEscape(id))
}
}
func TagEndpoint() Endpoint
|
{
return func(u *url.URL) {
addToUrl(u, "tags")
}
}
|
identifier_body
|
|
client.go
|
err := f(r)
if err != nil {
return nil, err
}
}
return self.client.Do(r)
}
// Commands
func prepend(slice []Modifier, a ...Modifier) []Modifier {
p := make([]Modifier, 0, len(slice)+len(a))
p = append(p, a...)
p = append(p, slice...)
return p
}
// Create new Definition
func (self *Client) Create(md MetricDefinition, o ...Modifier) (bool, error) {
// Keep the order, add custom prepend
o = prepend(o, self.Url("POST", TypeEndpoint(md.Type)), Data(md))
r, err := self.Send(o...)
if err != nil {
return false, err
}
defer r.Body.Close()
if r.StatusCode > 399 {
err = self.parseErrorResponse(r)
if err, ok := err.(*HawkularClientError); ok {
if err.Code != http.StatusConflict {
return false, err
} else {
return false, nil
}
}
return false, err
}
return true, nil
}
// Fetch definitions
func (self *Client) Definitions(o ...Modifier) ([]*MetricDefinition, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(Generic)))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
md := []*MetricDefinition{}
if b != nil {
if err = json.Unmarshal(b, &md); err != nil {
return nil, err
}
}
return md, err
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Update tags
func (self *Client) UpdateTags(t MetricType, id string, tags map[string]string, o ...Modifier) error {
o = prepend(o, self.Url("PUT", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint()), Data(tags))
r, err := self.Send(o...)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode > 399 {
return self.parseErrorResponse(r)
}
return nil
}
// Delete given tags from the definition
func (self *Client) DeleteTags(t MetricType, id string, tags map[string]string, o ...Modifier) error {
o = prepend(o, self.Url("DELETE", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint(), TagsEndpoint(tags)))
r, err := self.Send(o...)
if err != nil {
return err
}
defer r.Body.Close()
if r.StatusCode > 399 {
return self.parseErrorResponse(r)
}
return nil
}
// Fetch metric definition tags
func (self *Client) Tags(t MetricType, id string, o ...Modifier) (map[string]string, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(t), SingleMetricEndpoint(id), TagEndpoint()))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
tags := make(map[string]string)
if b != nil {
if err = json.Unmarshal(b, &tags); err != nil {
return nil, err
}
}
return tags, nil
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Write datapoints to the server
func (self *Client) Write(metrics []MetricHeader, o ...Modifier) error {
if len(metrics) > 0 {
mHs := make(map[MetricType][]MetricHeader)
for _, m := range metrics {
if _, found := mHs[m.Type]; !found {
mHs[m.Type] = make([]MetricHeader, 0, 1)
}
mHs[m.Type] = append(mHs[m.Type], m)
}
wg := &sync.WaitGroup{}
errorsChan := make(chan error, len(mHs))
for k, v := range mHs {
wg.Add(1)
go func(k MetricType, v []MetricHeader) {
defer wg.Done()
// Should be sorted and splitted by type & tenant..
on := o
on = prepend(on, self.Url("POST", TypeEndpoint(k), DataEndpoint()), Data(v))
r, err := self.Send(on...)
if err != nil {
errorsChan <- err
return
}
defer r.Body.Close()
if r.StatusCode > 399 {
errorsChan <- self.parseErrorResponse(r)
}
}(k, v)
}
wg.Wait()
select {
case err, ok := <-errorsChan:
if ok {
return err
}
// If channel is closed, we're done
default:
// Nothing to do
}
}
return nil
}
// Read data from the server
func (self *Client) ReadMetric(t MetricType, id string, o ...Modifier) ([]*Datapoint, error) {
o = prepend(o, self.Url("GET", TypeEndpoint(t), SingleMetricEndpoint(id), DataEndpoint()))
r, err := self.Send(o...)
if err != nil {
return nil, err
}
defer r.Body.Close()
if r.StatusCode == http.StatusOK {
b, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
// Check for GaugeBucketpoint and so on for the rest.. uh
dp := []*Datapoint{}
if b != nil {
if err = json.Unmarshal(b, &dp); err != nil {
return nil, err
}
}
return dp, nil
} else if r.StatusCode > 399 {
return nil, self.parseErrorResponse(r)
}
return nil, nil
}
// Initialization
func NewHawkularClient(p Parameters) (*Client, error) {
if p.Path == "" {
p.Path = base_url
}
u := &url.URL{
Host: p.Host,
Path: p.Path,
Scheme: "http",
Opaque: fmt.Sprintf("//%s/%s", p.Host, p.Path),
}
return &Client{
url: u,
Tenant: p.Tenant,
client: &http.Client{
Timeout: timeout,
},
}, nil
}
// Public functions
// Older functions..
// Return a single definition
func (self *Client) Definition(t MetricType, id string) (*MetricDefinition, error) {
url := self.singleMetricsUrl(t, id)
b, err := self.process(url, "GET", nil)
if err != nil {
return nil, err
}
md := MetricDefinition{}
if b != nil {
if err = json.Unmarshal(b, &md); err != nil {
return nil, err
}
}
return &md, nil
}
// Read single Gauge metric's datapoints.
// TODO: Remove and replace with better Read properties? Perhaps with iterators?
func (self *Client) SingleGaugeMetric(id string, options map[string]string) ([]*Datapoint, error) {
id = cleanId(id)
u := self.paramUrl(self.dataUrl(self.singleMetricsUrl(Gauge, id)), options)
// fmt.Printf("Receiving for %s, from: %s\n", self.Tenant, u)
b, err := self.process(u, "GET", nil)
if err != nil {
return nil, err
}
metrics := []*Datapoint{}
if b != nil {
// fmt.Printf("Received: %s\n", string(b))
if err = json.Unmarshal(b, &metrics); err != nil {
return nil, err
}
}
return metrics, nil
}
// HTTP Helper functions
func cleanId(id string) string {
return url.QueryEscape(id)
}
// Override default http.NewRequest to avoid url.Parse which has a bug (removes valid %2F)
func (self *Client) newRequest(url *url.URL, method string, body io.Reader) (*http.Request, error) {
rc, ok := body.(io.ReadCloser)
if !ok && body != nil {
rc = ioutil.NopCloser(body)
}
req := &http.Request{
Method: method,
URL: url,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Header: make(http.Header),
Body: rc,
Host: url.Host,
}
if body != nil {
switch v := body.(type) {
case *bytes.Buffer:
req.ContentLength = int64(v.Len())
case *bytes.Reader:
req.ContentLength = int64(v.Len())
case *strings.Reader:
req.ContentLength = int64(v.Len())
}
}
return req, nil
}
// Helper function that transforms struct to json and fetches the correct tenant information
// TODO: Try the decorator pattern to replace all these simple functions?
func (self *Client)
|
process
|
identifier_name
|
|
ifd.rs
|
1
TileOffsets 324; // 144 LONG TilesPerImage
TileByteCounts 325; // 145 SHORT or LONG TilesPerImage
InkSet 332; // 14C SHORT 1
InkNames 333; // 14D ASCII t
NumberOfInks 334; // 14E SHORT 1
DotRange 336; // 150 BYTE or SHORT 2, or 2*
TargetPrinter 337; // 151 ASCII any
ExtraSamples 338; // 152 BYTE number of extra compo
SampleFormat 339; // 153 SHORT SamplesPerPixel
SMinSampleValue 340; // 154 Any SamplesPerPixel
SMaxSampleValue 341; // 155 Any SamplesPerPixel
TransferRange 342; // 156 SHORT 6
JPEGProc 512; // 200 SHORT 1
JPEGInterchangeFormat 513; // 201 LONG 1
JPEGInterchangeFormatLngth 514; // 202 LONG 1
JPEGRestartInterval 515; // 203 SHORT 1
JPEGLosslessPredictors 517; // 205 SHORT SamplesPerPixel
JPEGPointTransforms 518; // 206 SHORT SamplesPerPixel
JPEGQTables 519; // 207 LONG SamplesPerPixel
JPEGDCTables 520; // 208 LONG SamplesPerPixel
JPEGACTables 521; // 209 LONG SamplesPerPixel
YCbCrCoefficients 529; // 211 RATIONAL 3
YCbCrSubSampling 530; // 212 SHORT 2
YCbCrPositioning 531; // 213 SHORT 1
ReferenceBlackWhite 532; // 214 LONG 2*SamplesPerPixel
Copyright 33432; // 8298 ASCII Any
}
// Note: These tags appear in the order they are mentioned in the TIFF reference
// https://partners.adobe.com/public/developer/en/tiff/TIFF6.pdf
// tags!{
// // Baseline tags:
// Artist 315; // TODO add support
// // grayscale images PhotometricInterpretation 1 or 3
// BitsPerSample 258;
// CellLength 265; // TODO add support
// CellWidth 264; // TODO add support
// // palette-color images (PhotometricInterpretation 3)
// ColorMap 320; // TODO add support
// Compression 259; // TODO add support for 2 and 32773
// Copyright 33432; // TODO add support
// DateTime 306; // TODO add support
// ExtraSamples 338; // TODO add support
// FillOrder 266; // TODO add support
// FreeByteCounts 289; // TODO add support
// FreeOffsets 288; // TODO add support
// GrayResponseCurve 291; // TODO add support
// GrayResponseUnit 290; // TODO add support
// HostComputer 316; // TODO add support
// ImageDescription 270; // TODO add support
// ImageLength 257;
// ImageWidth 256;
// Make 271; // TODO add support
// MaxSampleValue 281; // TODO add support
// MinSampleValue 280; // TODO add support
// Model 272; // TODO add support
// NewSubfileType 254; // TODO add support
// Orientation 274; // TODO add support
// PhotometricInterpretation 262;
// PlanarConfiguration 284;
// ResolutionUnit 296; // TODO add support
// RowsPerStrip 278;
// SamplesPerPixel 277;
// Software 305;
// StripByteCounts 279;
// StripOffsets 273;
// SubfileType 255; // TODO add support
// Threshholding 263; // TODO add support
// XResolution 282;
// YResolution 283;
// // Advanced tags
// Predictor 317;
// // TIFF Extensions
// // Section 11 CCITT Bilevel Encodings
// // Compression
// T4Options 292;
// T6Options 293;
// // Section 12 Document Storagte and Retrieval
// DocumentName 269;
// PageName 285;
// PageNumber 297;
// XPosition 286;
// YPosition 287;
// // Section 13: LZW Compression
// // Section 14: Differencing Predictor
// // Section 15: Tiled Images -- Do not use both striporiented and tile-oriented fields in the same TIFF file
// TileWidth 322;
// TileLength 323;
// TileOffsets 324;
// TileByteCounts 325;
// // Section 16: CMYK Images
// InkSet 332;
// NumberOfInks 334;
// InkNames 333;
// DotRange 336;
// TargetPrinter 337;
// // Section 17: HalftoneHints
// HalftoneHints 321;
// // Section 18: Associated Alpha Handling
// ExtraSamples 338;
// // Section 19: Data Sample Format
// SampleFormat 339;
// SMinSampleValue 340;
// SMaxSampleValue 341;
// // Section 20: RGB Image Colorimetry
// WhitePoint 318;
// PrimaryChromaticities 319;
// TransferFunction 301;
// TransferRange 342;
// ReferenceBlackWhite 532;
// // Section 21: YCbCr Images
// }
enum_from_primitive! {
#[derive(Clone, Copy, Debug)]
pub enum Type {
BYTE = 1,
ASCII = 2,
SHORT = 3,
LONG = 4,
RATIONAL = 5,
SBYTE = 6,
UNDEFINED = 7,
SSHORT = 8,
SLONG = 9,
SRATIONAL = 10,
FLOAT = 11,
DOUBLE = 12,
}
}
#[allow(unused_qualifications)]
#[derive(Debug)]
pub enum Value {
//Signed(i32),
Unsigned(u32),
List(Vec<Value>)
}
#[allow(unused_qualifications)]
#[derive(Debug)]
pub enum Value_Type {
Value,
Offset
}
impl Value {
pub fn as_u32(self) -> ::image::ImageResult<u32> {
match self {
Unsigned(val) => Ok(val),
val => Err(::image::ImageError::FormatError(format!(
"Expected unsigned integer, {:?} found.", val
)))
}
}
pub fn as_u32_vec(self) -> ::image::ImageResult<Vec<u32>> {
match self {
List(vec) => {
let mut new_vec = Vec::with_capacity(vec.len());
for v in vec.into_iter() {
new_vec.push(try!(v.as_u32()))
}
Ok(new_vec)
},
Unsigned(val) => Ok(vec![val]),
//_ => Err(::image::FormatError("Tag data malformed.".to_string()))
}
}
}
pub struct Entry {
type_: Type,
count: u32,
offset: [u8; 4]
}
impl ::std::fmt::Debug for Entry {
fn fmt(&self, fmt: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
fmt.write_str(&format!("Entry {{ type_: {:?}, count: {:?}, offset: {:?} }}",
self.type_,
self.count,
&self.offset,
// String::from_utf8_lossy ( &self.offset ),
))
}
}
impl Entry {
pub fn
|
new
|
identifier_name
|
|
ifd.rs
|
11 SHORT or LONG StripsPerImage
Orientation 274; // 112 SHORT 1
SamplesPerPixel 277; // 115 SHORT 1
RowsPerStrip 278; // 116 SHORT or LONG 1
StripByteCounts 279; // 117 LONG or SHORT StripsPerImage
MinSampleValue 280; // 118 SHORT SamplesPerPixel
MaxSampleValue 281; // 119 SHORT SamplesPerPixel
XResolution 282; // 11A RATIONAL 1
YResolution 283; // 11B RATIONAL 1
PlanarConfiguration 284; // 11C SHORT 1
PageName 285; // 11D ASCII
XPosition 286; // 11E RATIONAL
YPosition 287; // 11F RATIONAL
FreeOffsets 288; // 120 LONG
FreeByteCounts 289; // 121 LONG
GrayResponseUnit 290; // 122 SHORT
GrayResponseCurve 291; // 123 SHORT 2**BitsPerSample
T4Options 292; // 124 LONG 1
T6Options 293; // 125 LONG 1
ResolutionUnit 296; // 128 SHORT 1
PageNumber 297; // 129 SHORT 2
TransferFunction 301; // 12D SHORT
Software 305; // 131 ASCII
DateTime 306; // 132 ASCII 20
Artist 315; // 13B ASCII
HostComputer 316; // 13C ASCII
Predictor 317; // 13D SHORT 1
WhitePoint 318; // 13E RATIONAL 2
PrimaryChromaticities 319; // 13F RATIONAL 6
ColorMap 320; // 140 SHORT 3 * (2**BitsPerSample)
HalftoneHints 321; // 141 SHORT 2
TileWidth 322; // 142 SHORT or LONG 1
TileLength 323; // 143 SHORT or LONG 1
TileOffsets 324; // 144 LONG TilesPerImage
TileByteCounts 325; // 145 SHORT or LONG TilesPerImage
InkSet 332; // 14C SHORT 1
InkNames 333; // 14D ASCII t
NumberOfInks 334; // 14E SHORT 1
DotRange 336; // 150 BYTE or SHORT 2, or 2*
TargetPrinter 337; // 151 ASCII any
ExtraSamples 338; // 152 BYTE number of extra compo
SampleFormat 339; // 153 SHORT SamplesPerPixel
SMinSampleValue 340; // 154 Any SamplesPerPixel
SMaxSampleValue 341; // 155 Any SamplesPerPixel
TransferRange 342; // 156 SHORT 6
JPEGProc 512; // 200 SHORT 1
JPEGInterchangeFormat 513; // 201 LONG 1
JPEGInterchangeFormatLngth 514; // 202 LONG 1
JPEGRestartInterval 515; // 203 SHORT 1
JPEGLosslessPredictors 517; // 205 SHORT SamplesPerPixel
JPEGPointTransforms 518; // 206 SHORT SamplesPerPixel
JPEGQTables 519; // 207 LONG SamplesPerPixel
JPEGDCTables 520; // 208 LONG SamplesPerPixel
JPEGACTables 521; // 209 LONG SamplesPerPixel
YCbCrCoefficients 529; // 211 RATIONAL 3
YCbCrSubSampling 530; // 212 SHORT 2
YCbCrPositioning 531; // 213 SHORT 1
ReferenceBlackWhite 532; // 214 LONG 2*SamplesPerPixel
Copyright 33432; // 8298 ASCII Any
}
|
// tags!{
// // Baseline tags:
// Artist 315; // TODO add support
// // grayscale images PhotometricInterpretation 1 or 3
// BitsPerSample 258;
// CellLength 265; // TODO add support
// CellWidth 264; // TODO add support
// // palette-color images (PhotometricInterpretation 3)
// ColorMap 320; // TODO add support
// Compression 259; // TODO add support for 2 and 32773
// Copyright 33432; // TODO add support
// DateTime 306; // TODO add support
// ExtraSamples 338; // TODO add support
// FillOrder 266; // TODO add support
// FreeByteCounts 289; // TODO add support
// FreeOffsets 288; // TODO add support
// GrayResponseCurve 291; // TODO add support
// GrayResponseUnit 290; // TODO add support
// HostComputer 316; // TODO add support
// ImageDescription 270; // TODO add support
// ImageLength 257;
// ImageWidth 256;
// Make 271; // TODO add support
// MaxSampleValue 281; // TODO add support
// MinSampleValue 280; // TODO add support
// Model 272; // TODO add support
// NewSubfileType 254; // TODO add support
// Orientation 274; // TODO add support
// PhotometricInterpretation 262;
// PlanarConfiguration 284;
// ResolutionUnit 296; // TODO add support
// RowsPerStrip 278;
// SamplesPerPixel 277;
// Software 305;
// StripByteCounts 279;
// StripOffsets 273;
// SubfileType 255; // TODO add support
// Threshholding 263; // TODO add support
// XResolution 282;
// YResolution 283;
// // Advanced tags
// Predictor 317;
// // TIFF Extensions
// // Section 11 CCITT Bilevel Encodings
// // Compression
// T4Options 292;
// T6Options 293;
// // Section 12 Document Storagte and Retrieval
// DocumentName 269;
// PageName 285;
// PageNumber 297;
// XPosition 286;
// YPosition 287;
// // Section 13: LZW Compression
// // Section 14: Differencing Predictor
// // Section 15: Tiled Images -- Do not use both striporiented and tile-oriented fields in the same TIFF file
// TileWidth 322;
// TileLength 323;
// TileOffsets 324;
// TileByteCounts 325;
// // Section 16: CMYK Images
// InkSet 332;
// NumberOfInks 334;
// InkNames 333;
//
|
// Note: These tags appear in the order they are mentioned in the TIFF reference
// https://partners.adobe.com/public/developer/en/tiff/TIFF6.pdf
|
random_line_split
|
client.py
|
query_params['trace'] = self.trace
return super(BigQueryModel, self).request(headers, path_params, query_params, body_value)
# pylint: disable=E1002
class BigQueryHttp(apiclient_request.HttpRequest):
"""Converts errors into BigQuery errors."""
def __init__(self, http_model, *args, **kwargs):
super(BigQueryHttp, self).__init__(*args, **kwargs)
self._model = http_model
@staticmethod
def factory(bigquery_model):
"""Returns a function that creates a BigQueryHttp with the given model."""
def _create_bigquery_http_request(*args, **kwargs):
captured_model = bigquery_model
return BigQueryHttp(captured_model, *args, **kwargs)
return _create_bigquery_http_request
def execute(self, **kwargs):
try:
return super(BigQueryHttp, self).execute(**kwargs)
except apiclient.errors.HttpError, e:
# TODO(user): Remove this when apiclient supports logging of error responses.
self._model._log_response(e.resp, e.content)
if e.resp.get('content-type', '').startswith('application/json'):
result = json.loads(e.content)
error = result.get('error', {}).get('errors', [{}])[0]
raise BigQueryError.create(error, result, [])
else:
raise BigQueryCommunicationError(
('Could not connect with BigQuery server.\n'
'Http response status: %s\n'
'Http response content:\n%s') % (e.resp.get('status', '(unexpected)'), e.content))
class BigQueryClient(object):
def
|
(self, use_jwt_credentials_auth=False, jwt_account_name='', jwt_key_func=None, oauth_credentails_file=None, trace=None):
"""
:param trace: A value to add to all outgoing requests
:return:
"""
super(BigQueryClient, self).__init__()
self.trace = trace
self.use_jwt_credentials_auth = use_jwt_credentials_auth
self.jwt_account_name = jwt_account_name
self.jwt_key_func = jwt_key_func
self.oauth_credentails_file = oauth_credentails_file
###### Wrapping BigQuery's API
def datasets(self):
return self.api_client.datasets()
def jobs(self):
return self.api_client.jobs()
def projects(self):
return self.api_client.projects()
def tabledata(self):
return self.api_client.tabledata()
def tables(self):
return self.api_client.tables()
def get_http_for_request(self):
if self.use_jwt_credentials_auth: # Local debugging using pem file
scope = 'https://www.googleapis.com/auth/bigquery'
from oauth2client.client import SignedJwtAssertionCredentials
credentials = SignedJwtAssertionCredentials(self.jwt_account_name, self.jwt_key_func(), scope=scope)
logging.info("Using Standard jwt authentication")
return credentials.authorize(httplib2.Http())
elif self.is_in_appengine(): # App engine
from google.appengine.api import memcache
scope = 'https://www.googleapis.com/auth/bigquery'
credentials = AppAssertionCredentials(scope=scope)
logging.info("Using Standard appengine authentication")
return credentials.authorize(httplib2.Http(memcache))
elif self.oauth_credentails_file: # Local oauth token
http = httplib2.Http()
storage = Storage(self.oauth_credentails_file)
credentials = storage.get()
if not credentials:
raise EnvironmentError('No credential file present')
http = credentials.authorize(http)
credentials.refresh(http)
logging.info("Using Standard OAuth authentication")
return http
elif self.is_in_gce_machine(): # GCE authorization
http = httplib2.Http()
credentials = gce.AppAssertionCredentials('')
http = credentials.authorize(http)
credentials.refresh(http)
logging.info("Using GCE authentication")
return http
raise BigQueryAuthorizationError()
@staticmethod
def is_in_appengine():
'SERVER_SOFTWARE' in os.environ and os.environ['SERVER_SOFTWARE'].startswith('Google App Engine/')
@staticmethod
def is_in_gce_machine():
try:
metadata_uri = 'http://metadata.google.internal'
http = httplib2.Http()
http.request(metadata_uri, method='GET')
return True
except httplib2.ServerNotFoundError:
return False
@property
def api_client(self):
bigquery_model = BigQueryModel(trace=self.trace)
bigquery_http = BigQueryHttp.factory(bigquery_model)
http = self.get_http_for_request()
return build("bigquery", "v2", http=http, model=bigquery_model, requestBuilder=bigquery_http)
###### Utility methods
# tables() methods
def create_table(self, project_id, dataset_id, table_id, fields, ignore_existing=False,
description=None, friendly_name=None, expiration=None):
logging.info('create table %s on project %s dataset %s', table_id, project_id, dataset_id)
body = {
'tableReference': {
'tableId': table_id,
'datasetId': dataset_id,
'projectId': project_id
},
'schema': {
'fields': fields
}
}
if friendly_name is not None:
body['friendlyName'] = friendly_name
if description is not None:
body['description'] = description
if expiration is not None:
body['expirationTime'] = expiration
try:
logging.info('Creating table \ndatasetId:%s \nprojectId: %s \ntable_ref:%s', dataset_id, project_id, body)
response = self.tables().insert(projectId=project_id, datasetId=dataset_id, body=body).execute()
logging.info('%s create table response %s', project_id, response)
return response
except BigQueryDuplicateError:
if not ignore_existing:
raise
# tabledata() methods
def insert_rows(self, project_id, dataset_id, table_id, insert_id_generator, rows, ignore_invalid_rows=False):
"""Streams data into BigQuery one record at a time without needing to run a load job.
:param application_id: Project ID of the destination table. (required)
:param dataset_id: Dataset ID of the destination table. (required)
:param table_id: Table ID of the destination table. (required)
:param insert_id_generator: lambda that gets a row and generates an insertId.
:param rows: The rows to insert (array or single object)
:param ignore_invalid_rows: If True performs 2 inserts passes. On first pass, if there's an error google return "invalid" on error rows but doesnt insert anything (rest of the rows marked as "stopped").
So we filter out "invalid" rows and do a 2nd pass.
Note that this does not ignore if there's a BigQueryStreamingMaximumRowSizeExceeded error.
:return:
A response object (https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.tabledata.html#insertAll).
If ignore_invalid_rows is True and there were error return object is a dict containing the response object for the 2 insert passes performed: dict(response_pass1=..., response_pass2=...)
"""
if isinstance(rows, dict):
rows = [rows]
if insert_id_generator is not None:
rows_json = [{'json': r, 'insertId': insert_id_generator(r)} for r in rows]
else:
rows_json = [{'json': r} for r in rows]
body = {"rows": rows_json}
try:
logging.info("Inserting %s rows to projectId=%s, datasetId=%s, tableId=%s", len(rows), project_id, dataset_id, table_id)
response = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body).execute()
if 'insertErrors' in response:
insert_errors = response['insertErrors']
insert_errors_json = json.dumps(insert_errors)
if insert_errors_json.find('Maximum allowed row size exceeded') > -1:
raise BigQueryStreamingMaximumRowSizeExceededError()
logging.error("Failed to insert rows:\n%s", insert_errors_json)
if ignore_invalid_rows:
invalid_indices = [err['index'] for err in insert_errors
if any([x['reason'] == 'invalid' for x in err['errors']])]
rows_json_pass2 = [event for idx, event in enumerate(rows_json) if idx not in invalid_indices]
body_pass2 = {"rows": rows_json_pass2}
response2 = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body_pass2).execute()
return dict(response_pass1=response, response_pass2=response2, counts=dict(invalid_rows=len(invalid_indices), successfuly_added=len(rows_json_pass2)))
logging.info("Successfully inserted %s rows", len(rows))
return response
except BigQueryError as ex:
logging.exception(ex.message)
raise
# jobs() methods
def create_insert_job(self, project_id, dataset_id, table_id, gcs_links):
job_data = {
'projectId': project_id,
'configuration': {
'load': {
'sourceFormat': 'NEWLINE_DELIMITED_JSON',
'writeDisposition': 'WRITE_APPEND',
'
|
__init__
|
identifier_name
|
client.py
|
query_params['trace'] = self.trace
return super(BigQueryModel, self).request(headers, path_params, query_params, body_value)
# pylint: disable=E1002
class BigQueryHttp(apiclient_request.HttpRequest):
"""Converts errors into BigQuery errors."""
def __init__(self, http_model, *args, **kwargs):
super(BigQueryHttp, self).__init__(*args, **kwargs)
self._model = http_model
@staticmethod
def factory(bigquery_model):
"""Returns a function that creates a BigQueryHttp with the given model."""
def _create_bigquery_http_request(*args, **kwargs):
captured_model = bigquery_model
return BigQueryHttp(captured_model, *args, **kwargs)
return _create_bigquery_http_request
def execute(self, **kwargs):
try:
return super(BigQueryHttp, self).execute(**kwargs)
except apiclient.errors.HttpError, e:
# TODO(user): Remove this when apiclient supports logging of error responses.
self._model._log_response(e.resp, e.content)
if e.resp.get('content-type', '').startswith('application/json'):
result = json.loads(e.content)
error = result.get('error', {}).get('errors', [{}])[0]
raise BigQueryError.create(error, result, [])
else:
raise BigQueryCommunicationError(
('Could not connect with BigQuery server.\n'
'Http response status: %s\n'
'Http response content:\n%s') % (e.resp.get('status', '(unexpected)'), e.content))
class BigQueryClient(object):
def __init__(self, use_jwt_credentials_auth=False, jwt_account_name='', jwt_key_func=None, oauth_credentails_file=None, trace=None):
"""
:param trace: A value to add to all outgoing requests
:return:
"""
super(BigQueryClient, self).__init__()
self.trace = trace
self.use_jwt_credentials_auth = use_jwt_credentials_auth
self.jwt_account_name = jwt_account_name
self.jwt_key_func = jwt_key_func
self.oauth_credentails_file = oauth_credentails_file
###### Wrapping BigQuery's API
def datasets(self):
return self.api_client.datasets()
def jobs(self):
return self.api_client.jobs()
def projects(self):
return self.api_client.projects()
def tabledata(self):
return self.api_client.tabledata()
def tables(self):
return self.api_client.tables()
def get_http_for_request(self):
if self.use_jwt_credentials_auth: # Local debugging using pem file
scope = 'https://www.googleapis.com/auth/bigquery'
from oauth2client.client import SignedJwtAssertionCredentials
credentials = SignedJwtAssertionCredentials(self.jwt_account_name, self.jwt_key_func(), scope=scope)
logging.info("Using Standard jwt authentication")
return credentials.authorize(httplib2.Http())
elif self.is_in_appengine(): # App engine
from google.appengine.api import memcache
scope = 'https://www.googleapis.com/auth/bigquery'
credentials = AppAssertionCredentials(scope=scope)
logging.info("Using Standard appengine authentication")
return credentials.authorize(httplib2.Http(memcache))
elif self.oauth_credentails_file: # Local oauth token
http = httplib2.Http()
storage = Storage(self.oauth_credentails_file)
credentials = storage.get()
if not credentials:
raise EnvironmentError('No credential file present')
http = credentials.authorize(http)
credentials.refresh(http)
logging.info("Using Standard OAuth authentication")
return http
elif self.is_in_gce_machine(): # GCE authorization
http = httplib2.Http()
credentials = gce.AppAssertionCredentials('')
http = credentials.authorize(http)
credentials.refresh(http)
logging.info("Using GCE authentication")
return http
raise BigQueryAuthorizationError()
@staticmethod
def is_in_appengine():
|
@staticmethod
def is_in_gce_machine():
try:
metadata_uri = 'http://metadata.google.internal'
http = httplib2.Http()
http.request(metadata_uri, method='GET')
return True
except httplib2.ServerNotFoundError:
return False
@property
def api_client(self):
bigquery_model = BigQueryModel(trace=self.trace)
bigquery_http = BigQueryHttp.factory(bigquery_model)
http = self.get_http_for_request()
return build("bigquery", "v2", http=http, model=bigquery_model, requestBuilder=bigquery_http)
###### Utility methods
# tables() methods
def create_table(self, project_id, dataset_id, table_id, fields, ignore_existing=False,
description=None, friendly_name=None, expiration=None):
logging.info('create table %s on project %s dataset %s', table_id, project_id, dataset_id)
body = {
'tableReference': {
'tableId': table_id,
'datasetId': dataset_id,
'projectId': project_id
},
'schema': {
'fields': fields
}
}
if friendly_name is not None:
body['friendlyName'] = friendly_name
if description is not None:
body['description'] = description
if expiration is not None:
body['expirationTime'] = expiration
try:
logging.info('Creating table \ndatasetId:%s \nprojectId: %s \ntable_ref:%s', dataset_id, project_id, body)
response = self.tables().insert(projectId=project_id, datasetId=dataset_id, body=body).execute()
logging.info('%s create table response %s', project_id, response)
return response
except BigQueryDuplicateError:
if not ignore_existing:
raise
# tabledata() methods
def insert_rows(self, project_id, dataset_id, table_id, insert_id_generator, rows, ignore_invalid_rows=False):
"""Streams data into BigQuery one record at a time without needing to run a load job.
:param application_id: Project ID of the destination table. (required)
:param dataset_id: Dataset ID of the destination table. (required)
:param table_id: Table ID of the destination table. (required)
:param insert_id_generator: lambda that gets a row and generates an insertId.
:param rows: The rows to insert (array or single object)
:param ignore_invalid_rows: If True performs 2 inserts passes. On first pass, if there's an error google return "invalid" on error rows but doesnt insert anything (rest of the rows marked as "stopped").
So we filter out "invalid" rows and do a 2nd pass.
Note that this does not ignore if there's a BigQueryStreamingMaximumRowSizeExceeded error.
:return:
A response object (https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.tabledata.html#insertAll).
If ignore_invalid_rows is True and there were error return object is a dict containing the response object for the 2 insert passes performed: dict(response_pass1=..., response_pass2=...)
"""
if isinstance(rows, dict):
rows = [rows]
if insert_id_generator is not None:
rows_json = [{'json': r, 'insertId': insert_id_generator(r)} for r in rows]
else:
rows_json = [{'json': r} for r in rows]
body = {"rows": rows_json}
try:
logging.info("Inserting %s rows to projectId=%s, datasetId=%s, tableId=%s", len(rows), project_id, dataset_id, table_id)
response = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body).execute()
if 'insertErrors' in response:
insert_errors = response['insertErrors']
insert_errors_json = json.dumps(insert_errors)
if insert_errors_json.find('Maximum allowed row size exceeded') > -1:
raise BigQueryStreamingMaximumRowSizeExceededError()
logging.error("Failed to insert rows:\n%s", insert_errors_json)
if ignore_invalid_rows:
invalid_indices = [err['index'] for err in insert_errors
if any([x['reason'] == 'invalid' for x in err['errors']])]
rows_json_pass2 = [event for idx, event in enumerate(rows_json) if idx not in invalid_indices]
body_pass2 = {"rows": rows_json_pass2}
response2 = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body_pass2).execute()
return dict(response_pass1=response, response_pass2=response2, counts=dict(invalid_rows=len(invalid_indices), successfuly_added=len(rows_json_pass2)))
logging.info("Successfully inserted %s rows", len(rows))
return response
except BigQueryError as ex:
logging.exception(ex.message)
raise
# jobs() methods
def create_insert_job(self, project_id, dataset_id, table_id, gcs_links):
job_data = {
'projectId': project_id,
'configuration': {
'load': {
'sourceFormat': 'NEWLINE_DELIMITED_JSON',
'writeDisposition': 'WRITE_APPEND',
'source
|
'SERVER_SOFTWARE' in os.environ and os.environ['SERVER_SOFTWARE'].startswith('Google App Engine/')
|
identifier_body
|
client.py
|
query_params['trace'] = self.trace
return super(BigQueryModel, self).request(headers, path_params, query_params, body_value)
# pylint: disable=E1002
class BigQueryHttp(apiclient_request.HttpRequest):
"""Converts errors into BigQuery errors."""
def __init__(self, http_model, *args, **kwargs):
super(BigQueryHttp, self).__init__(*args, **kwargs)
self._model = http_model
|
@staticmethod
def factory(bigquery_model):
"""Returns a function that creates a BigQueryHttp with the given model."""
def _create_bigquery_http_request(*args, **kwargs):
captured_model = bigquery_model
return BigQueryHttp(captured_model, *args, **kwargs)
return _create_bigquery_http_request
def execute(self, **kwargs):
try:
return super(BigQueryHttp, self).execute(**kwargs)
except apiclient.errors.HttpError, e:
# TODO(user): Remove this when apiclient supports logging of error responses.
self._model._log_response(e.resp, e.content)
if e.resp.get('content-type', '').startswith('application/json'):
result = json.loads(e.content)
error = result.get('error', {}).get('errors', [{}])[0]
raise BigQueryError.create(error, result, [])
else:
raise BigQueryCommunicationError(
('Could not connect with BigQuery server.\n'
'Http response status: %s\n'
'Http response content:\n%s') % (e.resp.get('status', '(unexpected)'), e.content))
class BigQueryClient(object):
def __init__(self, use_jwt_credentials_auth=False, jwt_account_name='', jwt_key_func=None, oauth_credentails_file=None, trace=None):
"""
:param trace: A value to add to all outgoing requests
:return:
"""
super(BigQueryClient, self).__init__()
self.trace = trace
self.use_jwt_credentials_auth = use_jwt_credentials_auth
self.jwt_account_name = jwt_account_name
self.jwt_key_func = jwt_key_func
self.oauth_credentails_file = oauth_credentails_file
###### Wrapping BigQuery's API
def datasets(self):
return self.api_client.datasets()
def jobs(self):
return self.api_client.jobs()
def projects(self):
return self.api_client.projects()
def tabledata(self):
return self.api_client.tabledata()
def tables(self):
return self.api_client.tables()
def get_http_for_request(self):
if self.use_jwt_credentials_auth: # Local debugging using pem file
scope = 'https://www.googleapis.com/auth/bigquery'
from oauth2client.client import SignedJwtAssertionCredentials
credentials = SignedJwtAssertionCredentials(self.jwt_account_name, self.jwt_key_func(), scope=scope)
logging.info("Using Standard jwt authentication")
return credentials.authorize(httplib2.Http())
elif self.is_in_appengine(): # App engine
from google.appengine.api import memcache
scope = 'https://www.googleapis.com/auth/bigquery'
credentials = AppAssertionCredentials(scope=scope)
logging.info("Using Standard appengine authentication")
return credentials.authorize(httplib2.Http(memcache))
elif self.oauth_credentails_file: # Local oauth token
http = httplib2.Http()
storage = Storage(self.oauth_credentails_file)
credentials = storage.get()
if not credentials:
raise EnvironmentError('No credential file present')
http = credentials.authorize(http)
credentials.refresh(http)
logging.info("Using Standard OAuth authentication")
return http
elif self.is_in_gce_machine(): # GCE authorization
http = httplib2.Http()
credentials = gce.AppAssertionCredentials('')
http = credentials.authorize(http)
credentials.refresh(http)
logging.info("Using GCE authentication")
return http
raise BigQueryAuthorizationError()
@staticmethod
def is_in_appengine():
'SERVER_SOFTWARE' in os.environ and os.environ['SERVER_SOFTWARE'].startswith('Google App Engine/')
@staticmethod
def is_in_gce_machine():
try:
metadata_uri = 'http://metadata.google.internal'
http = httplib2.Http()
http.request(metadata_uri, method='GET')
return True
except httplib2.ServerNotFoundError:
return False
@property
def api_client(self):
bigquery_model = BigQueryModel(trace=self.trace)
bigquery_http = BigQueryHttp.factory(bigquery_model)
http = self.get_http_for_request()
return build("bigquery", "v2", http=http, model=bigquery_model, requestBuilder=bigquery_http)
###### Utility methods
# tables() methods
def create_table(self, project_id, dataset_id, table_id, fields, ignore_existing=False,
description=None, friendly_name=None, expiration=None):
logging.info('create table %s on project %s dataset %s', table_id, project_id, dataset_id)
body = {
'tableReference': {
'tableId': table_id,
'datasetId': dataset_id,
'projectId': project_id
},
'schema': {
'fields': fields
}
}
if friendly_name is not None:
body['friendlyName'] = friendly_name
if description is not None:
body['description'] = description
if expiration is not None:
body['expirationTime'] = expiration
try:
logging.info('Creating table \ndatasetId:%s \nprojectId: %s \ntable_ref:%s', dataset_id, project_id, body)
response = self.tables().insert(projectId=project_id, datasetId=dataset_id, body=body).execute()
logging.info('%s create table response %s', project_id, response)
return response
except BigQueryDuplicateError:
if not ignore_existing:
raise
# tabledata() methods
def insert_rows(self, project_id, dataset_id, table_id, insert_id_generator, rows, ignore_invalid_rows=False):
"""Streams data into BigQuery one record at a time without needing to run a load job.
:param application_id: Project ID of the destination table. (required)
:param dataset_id: Dataset ID of the destination table. (required)
:param table_id: Table ID of the destination table. (required)
:param insert_id_generator: lambda that gets a row and generates an insertId.
:param rows: The rows to insert (array or single object)
:param ignore_invalid_rows: If True performs 2 inserts passes. On first pass, if there's an error google return "invalid" on error rows but doesnt insert anything (rest of the rows marked as "stopped").
So we filter out "invalid" rows and do a 2nd pass.
Note that this does not ignore if there's a BigQueryStreamingMaximumRowSizeExceeded error.
:return:
A response object (https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.tabledata.html#insertAll).
If ignore_invalid_rows is True and there were error return object is a dict containing the response object for the 2 insert passes performed: dict(response_pass1=..., response_pass2=...)
"""
if isinstance(rows, dict):
rows = [rows]
if insert_id_generator is not None:
rows_json = [{'json': r, 'insertId': insert_id_generator(r)} for r in rows]
else:
rows_json = [{'json': r} for r in rows]
body = {"rows": rows_json}
try:
logging.info("Inserting %s rows to projectId=%s, datasetId=%s, tableId=%s", len(rows), project_id, dataset_id, table_id)
response = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body).execute()
if 'insertErrors' in response:
insert_errors = response['insertErrors']
insert_errors_json = json.dumps(insert_errors)
if insert_errors_json.find('Maximum allowed row size exceeded') > -1:
raise BigQueryStreamingMaximumRowSizeExceededError()
logging.error("Failed to insert rows:\n%s", insert_errors_json)
if ignore_invalid_rows:
invalid_indices = [err['index'] for err in insert_errors
if any([x['reason'] == 'invalid' for x in err['errors']])]
rows_json_pass2 = [event for idx, event in enumerate(rows_json) if idx not in invalid_indices]
body_pass2 = {"rows": rows_json_pass2}
response2 = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body_pass2).execute()
return dict(response_pass1=response, response_pass2=response2, counts=dict(invalid_rows=len(invalid_indices), successfuly_added=len(rows_json_pass2)))
logging.info("Successfully inserted %s rows", len(rows))
return response
except BigQueryError as ex:
logging.exception(ex.message)
raise
# jobs() methods
def create_insert_job(self, project_id, dataset_id, table_id, gcs_links):
job_data = {
'projectId': project_id,
'configuration': {
'load': {
'sourceFormat': 'NEWLINE_DELIMITED_JSON',
'writeDisposition': 'WRITE_APPEND',
'source
|
random_line_split
|
|
client.py
|
query_params['trace'] = self.trace
return super(BigQueryModel, self).request(headers, path_params, query_params, body_value)
# pylint: disable=E1002
class BigQueryHttp(apiclient_request.HttpRequest):
"""Converts errors into BigQuery errors."""
def __init__(self, http_model, *args, **kwargs):
super(BigQueryHttp, self).__init__(*args, **kwargs)
self._model = http_model
@staticmethod
def factory(bigquery_model):
"""Returns a function that creates a BigQueryHttp with the given model."""
def _create_bigquery_http_request(*args, **kwargs):
captured_model = bigquery_model
return BigQueryHttp(captured_model, *args, **kwargs)
return _create_bigquery_http_request
def execute(self, **kwargs):
try:
return super(BigQueryHttp, self).execute(**kwargs)
except apiclient.errors.HttpError, e:
# TODO(user): Remove this when apiclient supports logging of error responses.
self._model._log_response(e.resp, e.content)
if e.resp.get('content-type', '').startswith('application/json'):
result = json.loads(e.content)
error = result.get('error', {}).get('errors', [{}])[0]
raise BigQueryError.create(error, result, [])
else:
raise BigQueryCommunicationError(
('Could not connect with BigQuery server.\n'
'Http response status: %s\n'
'Http response content:\n%s') % (e.resp.get('status', '(unexpected)'), e.content))
class BigQueryClient(object):
def __init__(self, use_jwt_credentials_auth=False, jwt_account_name='', jwt_key_func=None, oauth_credentails_file=None, trace=None):
"""
:param trace: A value to add to all outgoing requests
:return:
"""
super(BigQueryClient, self).__init__()
self.trace = trace
self.use_jwt_credentials_auth = use_jwt_credentials_auth
self.jwt_account_name = jwt_account_name
self.jwt_key_func = jwt_key_func
self.oauth_credentails_file = oauth_credentails_file
###### Wrapping BigQuery's API
def datasets(self):
return self.api_client.datasets()
def jobs(self):
return self.api_client.jobs()
def projects(self):
return self.api_client.projects()
def tabledata(self):
return self.api_client.tabledata()
def tables(self):
return self.api_client.tables()
def get_http_for_request(self):
if self.use_jwt_credentials_auth: # Local debugging using pem file
scope = 'https://www.googleapis.com/auth/bigquery'
from oauth2client.client import SignedJwtAssertionCredentials
credentials = SignedJwtAssertionCredentials(self.jwt_account_name, self.jwt_key_func(), scope=scope)
logging.info("Using Standard jwt authentication")
return credentials.authorize(httplib2.Http())
elif self.is_in_appengine(): # App engine
|
elif self.oauth_credentails_file: # Local oauth token
http = httplib2.Http()
storage = Storage(self.oauth_credentails_file)
credentials = storage.get()
if not credentials:
raise EnvironmentError('No credential file present')
http = credentials.authorize(http)
credentials.refresh(http)
logging.info("Using Standard OAuth authentication")
return http
elif self.is_in_gce_machine(): # GCE authorization
http = httplib2.Http()
credentials = gce.AppAssertionCredentials('')
http = credentials.authorize(http)
credentials.refresh(http)
logging.info("Using GCE authentication")
return http
raise BigQueryAuthorizationError()
@staticmethod
def is_in_appengine():
'SERVER_SOFTWARE' in os.environ and os.environ['SERVER_SOFTWARE'].startswith('Google App Engine/')
@staticmethod
def is_in_gce_machine():
try:
metadata_uri = 'http://metadata.google.internal'
http = httplib2.Http()
http.request(metadata_uri, method='GET')
return True
except httplib2.ServerNotFoundError:
return False
@property
def api_client(self):
bigquery_model = BigQueryModel(trace=self.trace)
bigquery_http = BigQueryHttp.factory(bigquery_model)
http = self.get_http_for_request()
return build("bigquery", "v2", http=http, model=bigquery_model, requestBuilder=bigquery_http)
###### Utility methods
# tables() methods
def create_table(self, project_id, dataset_id, table_id, fields, ignore_existing=False,
description=None, friendly_name=None, expiration=None):
logging.info('create table %s on project %s dataset %s', table_id, project_id, dataset_id)
body = {
'tableReference': {
'tableId': table_id,
'datasetId': dataset_id,
'projectId': project_id
},
'schema': {
'fields': fields
}
}
if friendly_name is not None:
body['friendlyName'] = friendly_name
if description is not None:
body['description'] = description
if expiration is not None:
body['expirationTime'] = expiration
try:
logging.info('Creating table \ndatasetId:%s \nprojectId: %s \ntable_ref:%s', dataset_id, project_id, body)
response = self.tables().insert(projectId=project_id, datasetId=dataset_id, body=body).execute()
logging.info('%s create table response %s', project_id, response)
return response
except BigQueryDuplicateError:
if not ignore_existing:
raise
# tabledata() methods
def insert_rows(self, project_id, dataset_id, table_id, insert_id_generator, rows, ignore_invalid_rows=False):
"""Streams data into BigQuery one record at a time without needing to run a load job.
:param application_id: Project ID of the destination table. (required)
:param dataset_id: Dataset ID of the destination table. (required)
:param table_id: Table ID of the destination table. (required)
:param insert_id_generator: lambda that gets a row and generates an insertId.
:param rows: The rows to insert (array or single object)
:param ignore_invalid_rows: If True performs 2 inserts passes. On first pass, if there's an error google return "invalid" on error rows but doesnt insert anything (rest of the rows marked as "stopped").
So we filter out "invalid" rows and do a 2nd pass.
Note that this does not ignore if there's a BigQueryStreamingMaximumRowSizeExceeded error.
:return:
A response object (https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.tabledata.html#insertAll).
If ignore_invalid_rows is True and there were error return object is a dict containing the response object for the 2 insert passes performed: dict(response_pass1=..., response_pass2=...)
"""
if isinstance(rows, dict):
rows = [rows]
if insert_id_generator is not None:
rows_json = [{'json': r, 'insertId': insert_id_generator(r)} for r in rows]
else:
rows_json = [{'json': r} for r in rows]
body = {"rows": rows_json}
try:
logging.info("Inserting %s rows to projectId=%s, datasetId=%s, tableId=%s", len(rows), project_id, dataset_id, table_id)
response = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body).execute()
if 'insertErrors' in response:
insert_errors = response['insertErrors']
insert_errors_json = json.dumps(insert_errors)
if insert_errors_json.find('Maximum allowed row size exceeded') > -1:
raise BigQueryStreamingMaximumRowSizeExceededError()
logging.error("Failed to insert rows:\n%s", insert_errors_json)
if ignore_invalid_rows:
invalid_indices = [err['index'] for err in insert_errors
if any([x['reason'] == 'invalid' for x in err['errors']])]
rows_json_pass2 = [event for idx, event in enumerate(rows_json) if idx not in invalid_indices]
body_pass2 = {"rows": rows_json_pass2}
response2 = self.api_client.tabledata().insertAll(projectId=project_id, datasetId=dataset_id, tableId=table_id, body=body_pass2).execute()
return dict(response_pass1=response, response_pass2=response2, counts=dict(invalid_rows=len(invalid_indices), successfuly_added=len(rows_json_pass2)))
logging.info("Successfully inserted %s rows", len(rows))
return response
except BigQueryError as ex:
logging.exception(ex.message)
raise
# jobs() methods
def create_insert_job(self, project_id, dataset_id, table_id, gcs_links):
job_data = {
'projectId': project_id,
'configuration': {
'load': {
'sourceFormat': 'NEWLINE_DELIMITED_JSON',
'writeDisposition': 'WRITE_APPEND',
'
|
from google.appengine.api import memcache
scope = 'https://www.googleapis.com/auth/bigquery'
credentials = AppAssertionCredentials(scope=scope)
logging.info("Using Standard appengine authentication")
return credentials.authorize(httplib2.Http(memcache))
|
conditional_block
|
command.rs
|
/// Unsigned int vector to clear uvec4 targets.
Uint([u32; 4]),
}
/// Optional instance parameters: (instance count, buffer offset)
pub type InstanceParams = (InstanceCount, VertexCount);
/// An interface of the abstract command buffer. It collects commands in an
/// efficient API-specific manner, to be ready for execution on the device.
#[allow(missing_docs)]
pub trait Buffer<R: Resources>: Send {
/// Reset the command buffer contents, retain the allocated storage
fn reset(&mut self);
/// Bind a pipeline state object
fn bind_pipeline_state(&mut self, R::PipelineStateObject);
/// Bind a complete set of vertex buffers
fn bind_vertex_buffers(&mut self, pso::VertexBufferSet<R>);
/// Bind a complete set of constant buffers
fn bind_constant_buffers(&mut self, &[pso::ConstantBufferParam<R>]);
/// Bind a global constant
fn bind_global_constant(&mut self, shade::Location, shade::UniformValue);
/// Bind a complete set of shader resource views
fn bind_resource_views(&mut self, &[pso::ResourceViewParam<R>]);
/// Bind a complete set of unordered access views
fn bind_unordered_views(&mut self, &[pso::UnorderedViewParam<R>]);
/// Bind a complete set of samplers
fn bind_samplers(&mut self, &[pso::SamplerParam<R>]);
/// Bind a complete set of pixel targets, including multiple
/// colors views and an optional depth/stencil view.
fn bind_pixel_targets(&mut self, pso::PixelTargetSet<R>);
/// Bind an index buffer
fn bind_index(&mut self, R::Buffer, IndexType);
/// Set scissor rectangle
fn set_scissor(&mut self, target::Rect);
/// Set reference values for the blending and stencil front/back
fn set_ref_values(&mut self, state::RefValues);
/// Copy part of a buffer to another
fn copy_buffer(&mut self, src: R::Buffer, dst: R::Buffer,
src_offset_bytes: usize, dst_offset_bytes: usize,
size_bytes: usize);
/// Copy part of a buffer to a texture
fn copy_buffer_to_texture(&mut self,
src: R::Buffer, src_offset_bytes: usize,
dst: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo);
/// Copy part of a texture to a buffer
fn copy_texture_to_buffer(&mut self,
src: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo,
dst: R::Buffer, dst_offset_bytes: usize);
/// Update a vertex/index/uniform buffer
fn update_buffer(&mut self, R::Buffer, data: &[u8], offset: usize);
/// Update a texture
fn update_texture(&mut self, R::Texture, texture::Kind, Option<texture::CubeFace>,
data: &[u8], texture::RawImageInfo);
fn generate_mipmap(&mut self, R::ShaderResourceView);
/// Clear color target
fn clear_color(&mut self, R::RenderTargetView, ClearColor);
fn clear_depth_stencil(&mut self, R::DepthStencilView,
Option<target::Depth>, Option<target::Stencil>);
/// Draw a primitive
fn call_draw(&mut self, VertexCount, VertexCount, Option<InstanceParams>);
/// Draw a primitive with index buffer
fn call_draw_indexed(&mut self, VertexCount, VertexCount, VertexCount, Option<InstanceParams>);
}
macro_rules! impl_clear {
{ $( $ty:ty = $sub:ident[$a:expr, $b:expr, $c:expr, $d:expr], )* } => {
$(
impl From<$ty> for ClearColor {
fn from(v: $ty) -> ClearColor {
ClearColor::$sub([v[$a], v[$b], v[$c], v[$d]])
}
}
)*
}
}
impl_clear! {
[f32; 4] = Float[0, 1, 2, 3],
[f32; 3] = Float[0, 1, 2, 0],
[f32; 2] = Float[0, 1, 0, 0],
[i32; 4] = Int [0, 1, 2, 3],
[i32; 3] = Int [0, 1, 2, 0],
[i32; 2] = Int [0, 1, 0, 0],
[u32; 4] = Uint [0, 1, 2, 3],
[u32; 3] = Uint [0, 1, 2, 0],
[u32; 2] = Uint [0, 1, 0, 0],
}
impl From<f32> for ClearColor {
fn from(v: f32) -> ClearColor {
ClearColor::Float([v, 0.0, 0.0, 0.0])
}
}
impl From<i32> for ClearColor {
fn from(v: i32) -> ClearColor {
ClearColor::Int([v, 0, 0, 0])
}
}
impl From<u32> for ClearColor {
fn from(v: u32) -> ClearColor {
ClearColor::Uint([v, 0, 0, 0])
}
}
/// Informations about what is accessed by a bunch of commands.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct AccessInfo<R: Resources> {
mapped_reads: HashSet<handle::RawBuffer<R>>,
mapped_writes: HashSet<handle::RawBuffer<R>>,
}
impl<R: Resources> AccessInfo<R> {
/// Creates empty access informations
pub fn new() -> Self {
AccessInfo {
mapped_reads: HashSet::new(),
mapped_writes: HashSet::new(),
}
}
/// Clear access informations
pub fn clear(&mut self) {
self.mapped_reads.clear();
self.mapped_writes.clear();
}
/// Register a buffer read access
pub fn buffer_read(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_reads.insert(buffer.clone());
}
}
/// Register a buffer write access
pub fn buffer_write(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_writes.insert(buffer.clone());
}
}
/// Returns the mapped buffers that The GPU will read from
pub fn mapped_reads(&self) -> AccessInfoBuffers<R> {
self.mapped_reads.iter()
}
/// Returns the mapped buffers that The GPU will write to
pub fn mapped_writes(&self) -> AccessInfoBuffers<R> {
self.mapped_writes.iter()
}
/// Is there any mapped buffer reads ?
pub fn has_mapped_reads(&self) -> bool {
!self.mapped_reads.is_empty()
}
/// Is there any mapped buffer writes ?
pub fn has_mapped_writes(&self) -> bool {
!self.mapped_writes.is_empty()
}
/// Takes all the accesses necessary for submission
pub fn take_accesses(&self) -> SubmissionResult<AccessGuard<R>> {
for buffer in self.mapped_reads().chain(self.mapped_writes()) {
unsafe {
if !buffer.mapping().unwrap().take_access() {
return Err(SubmissionError::AccessOverlap);
}
}
}
Ok(AccessGuard { inner: self })
}
}
#[allow(missing_docs)]
pub type AccessInfoBuffers<'a, R> = hash_set::Iter<'a, handle::RawBuffer<R>>;
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuard<'a, R: Resources> {
inner: &'a AccessInfo<R>,
}
#[allow(missing_docs)]
impl<'a, R: Resources> AccessGuard<'a, R> {
/// Returns the mapped buffers that The GPU will read from,
/// with exclusive acces to their mapping
pub fn access_mapped_reads(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_reads()
}
}
/// Returns the mapped buffers that The GPU will write to,
/// with exclusive acces to their mapping
pub fn access_mapped_writes(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_writes()
}
}
pub fn access_mapped(&mut self) -> AccessGuardBuffersChain<R> {
AccessGuardBuffersChain {
fst: self.inner.mapped_reads(),
snd: self.inner.mapped_writes(),
}
}
}
impl<'a, R: Resources> Deref for AccessGuard<'a, R> {
type Target = AccessInfo<R>;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<'a, R: Resources> Drop for AccessGuard<'a, R> {
fn drop(&mut self) {
|
for buffer in self.inner.mapped_reads().chain(self.inner.mapped_writes()) {
unsafe {
buffer.mapping().unwrap().release_access();
}
|
random_line_split
|
|
command.rs
|
Type, InstanceCount, VertexCount,
SubmissionResult, SubmissionError};
use {state, target, pso, shade, texture, handle};
/// A universal clear color supporting integet formats
/// as well as the standard floating-point.
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub enum ClearColor {
/// Standard floating-point vec4 color
Float([f32; 4]),
/// Integer vector to clear ivec4 targets.
Int([i32; 4]),
/// Unsigned int vector to clear uvec4 targets.
Uint([u32; 4]),
}
/// Optional instance parameters: (instance count, buffer offset)
pub type InstanceParams = (InstanceCount, VertexCount);
/// An interface of the abstract command buffer. It collects commands in an
/// efficient API-specific manner, to be ready for execution on the device.
#[allow(missing_docs)]
pub trait Buffer<R: Resources>: Send {
/// Reset the command buffer contents, retain the allocated storage
fn reset(&mut self);
/// Bind a pipeline state object
fn bind_pipeline_state(&mut self, R::PipelineStateObject);
/// Bind a complete set of vertex buffers
fn bind_vertex_buffers(&mut self, pso::VertexBufferSet<R>);
/// Bind a complete set of constant buffers
fn bind_constant_buffers(&mut self, &[pso::ConstantBufferParam<R>]);
/// Bind a global constant
fn bind_global_constant(&mut self, shade::Location, shade::UniformValue);
/// Bind a complete set of shader resource views
fn bind_resource_views(&mut self, &[pso::ResourceViewParam<R>]);
/// Bind a complete set of unordered access views
fn bind_unordered_views(&mut self, &[pso::UnorderedViewParam<R>]);
/// Bind a complete set of samplers
fn bind_samplers(&mut self, &[pso::SamplerParam<R>]);
/// Bind a complete set of pixel targets, including multiple
/// colors views and an optional depth/stencil view.
fn bind_pixel_targets(&mut self, pso::PixelTargetSet<R>);
/// Bind an index buffer
fn bind_index(&mut self, R::Buffer, IndexType);
/// Set scissor rectangle
fn set_scissor(&mut self, target::Rect);
/// Set reference values for the blending and stencil front/back
fn set_ref_values(&mut self, state::RefValues);
/// Copy part of a buffer to another
fn copy_buffer(&mut self, src: R::Buffer, dst: R::Buffer,
src_offset_bytes: usize, dst_offset_bytes: usize,
size_bytes: usize);
/// Copy part of a buffer to a texture
fn copy_buffer_to_texture(&mut self,
src: R::Buffer, src_offset_bytes: usize,
dst: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo);
/// Copy part of a texture to a buffer
fn copy_texture_to_buffer(&mut self,
src: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo,
dst: R::Buffer, dst_offset_bytes: usize);
/// Update a vertex/index/uniform buffer
fn update_buffer(&mut self, R::Buffer, data: &[u8], offset: usize);
/// Update a texture
fn update_texture(&mut self, R::Texture, texture::Kind, Option<texture::CubeFace>,
data: &[u8], texture::RawImageInfo);
fn generate_mipmap(&mut self, R::ShaderResourceView);
/// Clear color target
fn clear_color(&mut self, R::RenderTargetView, ClearColor);
fn clear_depth_stencil(&mut self, R::DepthStencilView,
Option<target::Depth>, Option<target::Stencil>);
/// Draw a primitive
fn call_draw(&mut self, VertexCount, VertexCount, Option<InstanceParams>);
/// Draw a primitive with index buffer
fn call_draw_indexed(&mut self, VertexCount, VertexCount, VertexCount, Option<InstanceParams>);
}
macro_rules! impl_clear {
{ $( $ty:ty = $sub:ident[$a:expr, $b:expr, $c:expr, $d:expr], )* } => {
$(
impl From<$ty> for ClearColor {
fn from(v: $ty) -> ClearColor {
ClearColor::$sub([v[$a], v[$b], v[$c], v[$d]])
}
}
)*
}
}
impl_clear! {
[f32; 4] = Float[0, 1, 2, 3],
[f32; 3] = Float[0, 1, 2, 0],
[f32; 2] = Float[0, 1, 0, 0],
[i32; 4] = Int [0, 1, 2, 3],
[i32; 3] = Int [0, 1, 2, 0],
[i32; 2] = Int [0, 1, 0, 0],
[u32; 4] = Uint [0, 1, 2, 3],
[u32; 3] = Uint [0, 1, 2, 0],
[u32; 2] = Uint [0, 1, 0, 0],
}
impl From<f32> for ClearColor {
fn from(v: f32) -> ClearColor {
ClearColor::Float([v, 0.0, 0.0, 0.0])
}
}
impl From<i32> for ClearColor {
fn from(v: i32) -> ClearColor {
ClearColor::Int([v, 0, 0, 0])
}
}
impl From<u32> for ClearColor {
fn from(v: u32) -> ClearColor {
ClearColor::Uint([v, 0, 0, 0])
}
}
/// Informations about what is accessed by a bunch of commands.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct AccessInfo<R: Resources> {
mapped_reads: HashSet<handle::RawBuffer<R>>,
mapped_writes: HashSet<handle::RawBuffer<R>>,
}
impl<R: Resources> AccessInfo<R> {
/// Creates empty access informations
pub fn new() -> Self {
AccessInfo {
mapped_reads: HashSet::new(),
mapped_writes: HashSet::new(),
}
}
/// Clear access informations
pub fn clear(&mut self) {
self.mapped_reads.clear();
self.mapped_writes.clear();
}
/// Register a buffer read access
pub fn buffer_read(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_reads.insert(buffer.clone());
}
}
/// Register a buffer write access
pub fn buffer_write(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_writes.insert(buffer.clone());
}
}
/// Returns the mapped buffers that The GPU will read from
pub fn
|
(&self) -> AccessInfoBuffers<R> {
self.mapped_reads.iter()
}
/// Returns the mapped buffers that The GPU will write to
pub fn mapped_writes(&self) -> AccessInfoBuffers<R> {
self.mapped_writes.iter()
}
/// Is there any mapped buffer reads ?
pub fn has_mapped_reads(&self) -> bool {
!self.mapped_reads.is_empty()
}
/// Is there any mapped buffer writes ?
pub fn has_mapped_writes(&self) -> bool {
!self.mapped_writes.is_empty()
}
/// Takes all the accesses necessary for submission
pub fn take_accesses(&self) -> SubmissionResult<AccessGuard<R>> {
for buffer in self.mapped_reads().chain(self.mapped_writes()) {
unsafe {
if !buffer.mapping().unwrap().take_access() {
return Err(SubmissionError::AccessOverlap);
}
}
}
Ok(AccessGuard { inner: self })
}
}
#[allow(missing_docs)]
pub type AccessInfoBuffers<'a, R> = hash_set::Iter<'a, handle::RawBuffer<R>>;
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuard<'a, R: Resources> {
inner: &'a AccessInfo<R>,
}
#[allow(missing_docs)]
impl<'a, R: Resources> AccessGuard<'a, R> {
/// Returns the mapped buffers that The GPU will read from,
/// with exclusive acces to their mapping
pub fn access_mapped_reads(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_reads()
}
}
/// Returns the mapped buffers that The GPU will write to,
/// with exclusive acces to their mapping
pub fn access_mapped_writes(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_writes()
}
}
pub fn access_mapped(&mut self) -> AccessGuardBuffersChain<R> {
AccessGuardBuffersChain {
fst: self.inner.mapped_reads
|
mapped_reads
|
identifier_name
|
command.rs
|
Type, InstanceCount, VertexCount,
SubmissionResult, SubmissionError};
use {state, target, pso, shade, texture, handle};
/// A universal clear color supporting integet formats
/// as well as the standard floating-point.
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub enum ClearColor {
/// Standard floating-point vec4 color
Float([f32; 4]),
/// Integer vector to clear ivec4 targets.
Int([i32; 4]),
/// Unsigned int vector to clear uvec4 targets.
Uint([u32; 4]),
}
/// Optional instance parameters: (instance count, buffer offset)
pub type InstanceParams = (InstanceCount, VertexCount);
/// An interface of the abstract command buffer. It collects commands in an
/// efficient API-specific manner, to be ready for execution on the device.
#[allow(missing_docs)]
pub trait Buffer<R: Resources>: Send {
/// Reset the command buffer contents, retain the allocated storage
fn reset(&mut self);
/// Bind a pipeline state object
fn bind_pipeline_state(&mut self, R::PipelineStateObject);
/// Bind a complete set of vertex buffers
fn bind_vertex_buffers(&mut self, pso::VertexBufferSet<R>);
/// Bind a complete set of constant buffers
fn bind_constant_buffers(&mut self, &[pso::ConstantBufferParam<R>]);
/// Bind a global constant
fn bind_global_constant(&mut self, shade::Location, shade::UniformValue);
/// Bind a complete set of shader resource views
fn bind_resource_views(&mut self, &[pso::ResourceViewParam<R>]);
/// Bind a complete set of unordered access views
fn bind_unordered_views(&mut self, &[pso::UnorderedViewParam<R>]);
/// Bind a complete set of samplers
fn bind_samplers(&mut self, &[pso::SamplerParam<R>]);
/// Bind a complete set of pixel targets, including multiple
/// colors views and an optional depth/stencil view.
fn bind_pixel_targets(&mut self, pso::PixelTargetSet<R>);
/// Bind an index buffer
fn bind_index(&mut self, R::Buffer, IndexType);
/// Set scissor rectangle
fn set_scissor(&mut self, target::Rect);
/// Set reference values for the blending and stencil front/back
fn set_ref_values(&mut self, state::RefValues);
/// Copy part of a buffer to another
fn copy_buffer(&mut self, src: R::Buffer, dst: R::Buffer,
src_offset_bytes: usize, dst_offset_bytes: usize,
size_bytes: usize);
/// Copy part of a buffer to a texture
fn copy_buffer_to_texture(&mut self,
src: R::Buffer, src_offset_bytes: usize,
dst: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo);
/// Copy part of a texture to a buffer
fn copy_texture_to_buffer(&mut self,
src: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo,
dst: R::Buffer, dst_offset_bytes: usize);
/// Update a vertex/index/uniform buffer
fn update_buffer(&mut self, R::Buffer, data: &[u8], offset: usize);
/// Update a texture
fn update_texture(&mut self, R::Texture, texture::Kind, Option<texture::CubeFace>,
data: &[u8], texture::RawImageInfo);
fn generate_mipmap(&mut self, R::ShaderResourceView);
/// Clear color target
fn clear_color(&mut self, R::RenderTargetView, ClearColor);
fn clear_depth_stencil(&mut self, R::DepthStencilView,
Option<target::Depth>, Option<target::Stencil>);
/// Draw a primitive
fn call_draw(&mut self, VertexCount, VertexCount, Option<InstanceParams>);
/// Draw a primitive with index buffer
fn call_draw_indexed(&mut self, VertexCount, VertexCount, VertexCount, Option<InstanceParams>);
}
macro_rules! impl_clear {
{ $( $ty:ty = $sub:ident[$a:expr, $b:expr, $c:expr, $d:expr], )* } => {
$(
impl From<$ty> for ClearColor {
fn from(v: $ty) -> ClearColor {
ClearColor::$sub([v[$a], v[$b], v[$c], v[$d]])
}
}
)*
}
}
impl_clear! {
[f32; 4] = Float[0, 1, 2, 3],
[f32; 3] = Float[0, 1, 2, 0],
[f32; 2] = Float[0, 1, 0, 0],
[i32; 4] = Int [0, 1, 2, 3],
[i32; 3] = Int [0, 1, 2, 0],
[i32; 2] = Int [0, 1, 0, 0],
[u32; 4] = Uint [0, 1, 2, 3],
[u32; 3] = Uint [0, 1, 2, 0],
[u32; 2] = Uint [0, 1, 0, 0],
}
impl From<f32> for ClearColor {
fn from(v: f32) -> ClearColor {
ClearColor::Float([v, 0.0, 0.0, 0.0])
}
}
impl From<i32> for ClearColor {
fn from(v: i32) -> ClearColor {
ClearColor::Int([v, 0, 0, 0])
}
}
impl From<u32> for ClearColor {
fn from(v: u32) -> ClearColor {
ClearColor::Uint([v, 0, 0, 0])
}
}
/// Informations about what is accessed by a bunch of commands.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct AccessInfo<R: Resources> {
mapped_reads: HashSet<handle::RawBuffer<R>>,
mapped_writes: HashSet<handle::RawBuffer<R>>,
}
impl<R: Resources> AccessInfo<R> {
/// Creates empty access informations
pub fn new() -> Self {
AccessInfo {
mapped_reads: HashSet::new(),
mapped_writes: HashSet::new(),
}
}
/// Clear access informations
pub fn clear(&mut self) {
self.mapped_reads.clear();
self.mapped_writes.clear();
}
/// Register a buffer read access
pub fn buffer_read(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped()
|
}
/// Register a buffer write access
pub fn buffer_write(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_writes.insert(buffer.clone());
}
}
/// Returns the mapped buffers that The GPU will read from
pub fn mapped_reads(&self) -> AccessInfoBuffers<R> {
self.mapped_reads.iter()
}
/// Returns the mapped buffers that The GPU will write to
pub fn mapped_writes(&self) -> AccessInfoBuffers<R> {
self.mapped_writes.iter()
}
/// Is there any mapped buffer reads ?
pub fn has_mapped_reads(&self) -> bool {
!self.mapped_reads.is_empty()
}
/// Is there any mapped buffer writes ?
pub fn has_mapped_writes(&self) -> bool {
!self.mapped_writes.is_empty()
}
/// Takes all the accesses necessary for submission
pub fn take_accesses(&self) -> SubmissionResult<AccessGuard<R>> {
for buffer in self.mapped_reads().chain(self.mapped_writes()) {
unsafe {
if !buffer.mapping().unwrap().take_access() {
return Err(SubmissionError::AccessOverlap);
}
}
}
Ok(AccessGuard { inner: self })
}
}
#[allow(missing_docs)]
pub type AccessInfoBuffers<'a, R> = hash_set::Iter<'a, handle::RawBuffer<R>>;
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuard<'a, R: Resources> {
inner: &'a AccessInfo<R>,
}
#[allow(missing_docs)]
impl<'a, R: Resources> AccessGuard<'a, R> {
/// Returns the mapped buffers that The GPU will read from,
/// with exclusive acces to their mapping
pub fn access_mapped_reads(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_reads()
}
}
/// Returns the mapped buffers that The GPU will write to,
/// with exclusive acces to their mapping
pub fn access_mapped_writes(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_writes()
}
}
pub fn access_mapped(&mut self) -> AccessGuardBuffersChain<R> {
AccessGuardBuffersChain {
fst: self.inner.mapped
|
{
self.mapped_reads.insert(buffer.clone());
}
|
conditional_block
|
command.rs
|
Type, InstanceCount, VertexCount,
SubmissionResult, SubmissionError};
use {state, target, pso, shade, texture, handle};
/// A universal clear color supporting integet formats
/// as well as the standard floating-point.
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd)]
#[cfg_attr(feature = "serialize", derive(Serialize, Deserialize))]
pub enum ClearColor {
/// Standard floating-point vec4 color
Float([f32; 4]),
/// Integer vector to clear ivec4 targets.
Int([i32; 4]),
/// Unsigned int vector to clear uvec4 targets.
Uint([u32; 4]),
}
/// Optional instance parameters: (instance count, buffer offset)
pub type InstanceParams = (InstanceCount, VertexCount);
/// An interface of the abstract command buffer. It collects commands in an
/// efficient API-specific manner, to be ready for execution on the device.
#[allow(missing_docs)]
pub trait Buffer<R: Resources>: Send {
/// Reset the command buffer contents, retain the allocated storage
fn reset(&mut self);
/// Bind a pipeline state object
fn bind_pipeline_state(&mut self, R::PipelineStateObject);
/// Bind a complete set of vertex buffers
fn bind_vertex_buffers(&mut self, pso::VertexBufferSet<R>);
/// Bind a complete set of constant buffers
fn bind_constant_buffers(&mut self, &[pso::ConstantBufferParam<R>]);
/// Bind a global constant
fn bind_global_constant(&mut self, shade::Location, shade::UniformValue);
/// Bind a complete set of shader resource views
fn bind_resource_views(&mut self, &[pso::ResourceViewParam<R>]);
/// Bind a complete set of unordered access views
fn bind_unordered_views(&mut self, &[pso::UnorderedViewParam<R>]);
/// Bind a complete set of samplers
fn bind_samplers(&mut self, &[pso::SamplerParam<R>]);
/// Bind a complete set of pixel targets, including multiple
/// colors views and an optional depth/stencil view.
fn bind_pixel_targets(&mut self, pso::PixelTargetSet<R>);
/// Bind an index buffer
fn bind_index(&mut self, R::Buffer, IndexType);
/// Set scissor rectangle
fn set_scissor(&mut self, target::Rect);
/// Set reference values for the blending and stencil front/back
fn set_ref_values(&mut self, state::RefValues);
/// Copy part of a buffer to another
fn copy_buffer(&mut self, src: R::Buffer, dst: R::Buffer,
src_offset_bytes: usize, dst_offset_bytes: usize,
size_bytes: usize);
/// Copy part of a buffer to a texture
fn copy_buffer_to_texture(&mut self,
src: R::Buffer, src_offset_bytes: usize,
dst: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo);
/// Copy part of a texture to a buffer
fn copy_texture_to_buffer(&mut self,
src: R::Texture, texture::Kind,
Option<texture::CubeFace>, texture::RawImageInfo,
dst: R::Buffer, dst_offset_bytes: usize);
/// Update a vertex/index/uniform buffer
fn update_buffer(&mut self, R::Buffer, data: &[u8], offset: usize);
/// Update a texture
fn update_texture(&mut self, R::Texture, texture::Kind, Option<texture::CubeFace>,
data: &[u8], texture::RawImageInfo);
fn generate_mipmap(&mut self, R::ShaderResourceView);
/// Clear color target
fn clear_color(&mut self, R::RenderTargetView, ClearColor);
fn clear_depth_stencil(&mut self, R::DepthStencilView,
Option<target::Depth>, Option<target::Stencil>);
/// Draw a primitive
fn call_draw(&mut self, VertexCount, VertexCount, Option<InstanceParams>);
/// Draw a primitive with index buffer
fn call_draw_indexed(&mut self, VertexCount, VertexCount, VertexCount, Option<InstanceParams>);
}
macro_rules! impl_clear {
{ $( $ty:ty = $sub:ident[$a:expr, $b:expr, $c:expr, $d:expr], )* } => {
$(
impl From<$ty> for ClearColor {
fn from(v: $ty) -> ClearColor {
ClearColor::$sub([v[$a], v[$b], v[$c], v[$d]])
}
}
)*
}
}
impl_clear! {
[f32; 4] = Float[0, 1, 2, 3],
[f32; 3] = Float[0, 1, 2, 0],
[f32; 2] = Float[0, 1, 0, 0],
[i32; 4] = Int [0, 1, 2, 3],
[i32; 3] = Int [0, 1, 2, 0],
[i32; 2] = Int [0, 1, 0, 0],
[u32; 4] = Uint [0, 1, 2, 3],
[u32; 3] = Uint [0, 1, 2, 0],
[u32; 2] = Uint [0, 1, 0, 0],
}
impl From<f32> for ClearColor {
fn from(v: f32) -> ClearColor {
ClearColor::Float([v, 0.0, 0.0, 0.0])
}
}
impl From<i32> for ClearColor {
fn from(v: i32) -> ClearColor {
ClearColor::Int([v, 0, 0, 0])
}
}
impl From<u32> for ClearColor {
fn from(v: u32) -> ClearColor
|
}
/// Informations about what is accessed by a bunch of commands.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct AccessInfo<R: Resources> {
mapped_reads: HashSet<handle::RawBuffer<R>>,
mapped_writes: HashSet<handle::RawBuffer<R>>,
}
impl<R: Resources> AccessInfo<R> {
/// Creates empty access informations
pub fn new() -> Self {
AccessInfo {
mapped_reads: HashSet::new(),
mapped_writes: HashSet::new(),
}
}
/// Clear access informations
pub fn clear(&mut self) {
self.mapped_reads.clear();
self.mapped_writes.clear();
}
/// Register a buffer read access
pub fn buffer_read(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_reads.insert(buffer.clone());
}
}
/// Register a buffer write access
pub fn buffer_write(&mut self, buffer: &handle::RawBuffer<R>) {
if buffer.is_mapped() {
self.mapped_writes.insert(buffer.clone());
}
}
/// Returns the mapped buffers that The GPU will read from
pub fn mapped_reads(&self) -> AccessInfoBuffers<R> {
self.mapped_reads.iter()
}
/// Returns the mapped buffers that The GPU will write to
pub fn mapped_writes(&self) -> AccessInfoBuffers<R> {
self.mapped_writes.iter()
}
/// Is there any mapped buffer reads ?
pub fn has_mapped_reads(&self) -> bool {
!self.mapped_reads.is_empty()
}
/// Is there any mapped buffer writes ?
pub fn has_mapped_writes(&self) -> bool {
!self.mapped_writes.is_empty()
}
/// Takes all the accesses necessary for submission
pub fn take_accesses(&self) -> SubmissionResult<AccessGuard<R>> {
for buffer in self.mapped_reads().chain(self.mapped_writes()) {
unsafe {
if !buffer.mapping().unwrap().take_access() {
return Err(SubmissionError::AccessOverlap);
}
}
}
Ok(AccessGuard { inner: self })
}
}
#[allow(missing_docs)]
pub type AccessInfoBuffers<'a, R> = hash_set::Iter<'a, handle::RawBuffer<R>>;
#[allow(missing_docs)]
#[derive(Debug)]
pub struct AccessGuard<'a, R: Resources> {
inner: &'a AccessInfo<R>,
}
#[allow(missing_docs)]
impl<'a, R: Resources> AccessGuard<'a, R> {
/// Returns the mapped buffers that The GPU will read from,
/// with exclusive acces to their mapping
pub fn access_mapped_reads(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_reads()
}
}
/// Returns the mapped buffers that The GPU will write to,
/// with exclusive acces to their mapping
pub fn access_mapped_writes(&mut self) -> AccessGuardBuffers<R> {
AccessGuardBuffers {
buffers: self.inner.mapped_writes()
}
}
pub fn access_mapped(&mut self) -> AccessGuardBuffersChain<R> {
AccessGuardBuffersChain {
fst: self.inner.mapped
|
{
ClearColor::Uint([v, 0, 0, 0])
}
|
identifier_body
|
xcapture.go
|
0 {
chistMu.Lock()
var cbracket hdrhistogram.Bracket
var wbracket hdrhistogram.Bracket
var rbracket hdrhistogram.Bracket
brackets := chist.CumulativeDistribution()
for _, bracket := range brackets {
if bracket.ValueAt > int64(d) {
break
}
cbracket = bracket
}
brackets = whist.CumulativeDistribution()
for _, bracket := range brackets {
if bracket.ValueAt > int64(d) {
break
}
wbracket = bracket
}
brackets = rhist.CumulativeDistribution()
for _, bracket := range brackets {
if bracket.ValueAt > int64(d) {
break
}
rbracket = bracket
}
s := "%d frames, %d dup, started recording %s ago\n" +
"capture latency min/max/avg: %.2fms/%.2fms/%.2fms±%.2fms (%g %%ile: %.2fms)\n" +
"write latency min/max/avg: %.2fms/%.2fms/%.2fms±%.2fms (%g %%ile: %.2fms)\n" +
"render loop min/max/avg: %.2fms/%.2fms/%.2fms±%.2fms (%g %%ile: %.2fms)\n" +
"Last slowdown: %s (%d total)\n"
if !first {
s = "\033[2K" +
"\033[1A\033[2K" +
"\033[1A\033[2K" +
"\033[1A\033[2K" +
"\033[1A\033[2K" +
"\033[1A\033[2K" +
"\r" + s
}
first = false
var dslow interface{}
if lastSlow.IsZero() {
dslow = "never"
} else {
dslow = time.Since(lastSlow).String() + " ago"
}
fmt.Fprintf(os.Stderr, s,
whist.TotalCount(), dupped, time.Since(start),
milliseconds(chist.Min()), milliseconds(chist.Max()), milliseconds(int64(chist.Mean())), milliseconds(int64(chist.StdDev())), cbracket.Quantile, milliseconds(cbracket.ValueAt),
milliseconds(whist.Min()), milliseconds(whist.Max()), milliseconds(int64(whist.Mean())), milliseconds(int64(whist.StdDev())), wbracket.Quantile, milliseconds(wbracket.ValueAt),
milliseconds(rhist.Min()), milliseconds(rhist.Max()), milliseconds(int64(rhist.Mean())), milliseconds(int64(rhist.StdDev())), rbracket.Quantile, milliseconds(rbracket.ValueAt),
dslow, slows)
chistMu.Unlock()
}
var err error
t := time.Now()
select {
case frame := <-ch:
err = vw.SendFrame(frame)
prevFrameTime = frame.Time
default:
dupped++
err = vw.SendFrame(Frame{Time: prevFrameTime.Add(d)})
prevFrameTime = prevFrameTime.Add(d)
}
whist.RecordCorrectedValue(int64(time.Since(t)), int64(d))
if err != nil {
log.Fatal("Couldn't write frame:", err)
}
dt := time.Since(ts)
if dt > d {
lastSlow = time.Now()
slows++
}
rhist.RecordCorrectedValue(int64(dt), int64(d))
}
}()
el := NewEventLoop(xu.Conn())
res := NewResizeMonitor(el, win)
var other chan CaptureEvent
captureEvents := make(chan CaptureEvent, 1)
if *cfr {
other = make(chan CaptureEvent)
go func() {
for {
other <- CaptureEvent{}
}
}()
} else {
if err := damage.Init(xu.Conn()); err != nil {
// XXX fail back gracefully
log.Fatal(err)
}
damage.QueryVersion(xu.Conn(), 1, 1)
dmg := NewDamageMonitor(xu.Conn(), el, win, int(*fps))
other = dmg.C
}
go func() {
for {
var ev CaptureEvent
select {
case ev = <-res.C:
captureEvents <- ev
case ev = <-other:
captureEvents <- ev
}
}
}()
for ev := range captureEvents {
t := time.Now()
if ev.Resized {
// DRY
xproto.FreePixmap(xu.Conn(), pix)
var err error
pix, err = xproto.NewPixmapId(xu.Conn())
if err != nil {
log.Fatal("Could not obtain ID for pixmap:", err)
}
composite.NameWindowPixmap(xu.Conn(), xproto.Window(win.ID), pix)
}
w, h, bw := win.Dimensions()
offset := buf.PageOffset(i)
w = min(w, canvas.Width)
h = min(h, canvas.Height)
ts := time.Now()
_, err := xshm.GetImage(xu.Conn(), xproto.Drawable(pix), int16(bw), int16(bw), uint16(w), uint16(h), 0xFFFFFFFF, xproto.ImageFormatZPixmap, segID, uint32(offset)).Reply()
if err != nil {
continue
}
page := buf.Page(i)
if w < canvas.Width || h < canvas.Height {
i = (i + 1) % numPages
dest := buf.Page(i)
for i := range dest {
dest[i] = 0
}
for i := 0; i < h; i++ {
copy(dest[i*canvas.Width*bytesPerPixel:], page[i*w*bytesPerPixel:(i+1)*w*bytesPerPixel])
}
page = dest
}
drawCursor(xu, win, buf, page, canvas)
chistMu.Lock()
chist.RecordValue(int64(time.Since(t)))
chistMu.Unlock()
ch <- Frame{Data: page, Time: ts}
i = (i + 1) % numPages
}
}
func drawCursor(xu *xgbutil.XUtil, win *Window, buf Buffer, page []byte, canvas Canvas) {
// TODO(dh): We don't need to fetch the cursor image every time.
// We could listen to cursor notify events, fetch the cursor if we
// haven't seen it yet, then cache the cursor.
cursor, err := xfixes.GetCursorImage(xu.Conn()).Reply()
if err != nil {
return
}
pos, err := xproto.TranslateCoordinates(xu.Conn(), xu.RootWin(), xproto.Window(win.ID), cursor.X, cursor.Y).Reply()
if err != nil {
return
}
w, h, _ := win.Dimensions()
w = min(w, canvas.Width)
h = min(h, canvas.Height)
if pos.DstY < 0 || pos.DstX < 0 || int(pos.DstY) > h || int(pos.DstX) > w {
// cursor outside of our window
return
}
for i, p := range cursor.CursorImage {
row := i/int(cursor.Width) + int(pos.DstY) - int(cursor.Yhot)
col := i%int(cursor.Width) + int(pos.DstX) - int(cursor.Xhot)
if row >= canvas.Height || col >= canvas.Width || row < 0 || col < 0 {
// cursor is partially off-screen
break
}
off := row*canvas.Width*bytesPerPixel + col*bytesPerPixel
alpha := (p >> 24) + 1
invAlpha := 256 - (p >> 24)
page[off+3] = 255
page[off+2] = byte((alpha*uint32(byte(p>>16)) + invAlpha*uint32(page[off+2])) >> 8)
page[off+1] = byte((alpha*uint32(byte(p>>8)) + invAlpha*uint32(page[off+1])) >> 8)
page[off+0] = byte((alpha*uint32(byte(p>>0)) + invAlpha*uint32(page[off+0])) >> 8)
}
}
func roundDuration(d, m time.Duration) time.Duration {
|
if m <= 0 {
return d
}
r := d % m
if r < 0 {
r = -r
if r+r < m {
return d + r
}
if d1 := d - m + r; d1 < d {
return d1
}
return d // overflow
}
if r+r < m {
return d - r
}
if d1 := d + m - r; d1 > d {
return d1
}
|
identifier_body
|
|
xcapture.go
|
Monitor struct {
C chan CaptureEvent
elCh chan xgb.Event
win *Window
}
func NewResizeMonitor(el *EventLoop, win *Window) *ResizeMonitor {
res := &ResizeMonitor{
C: make(chan CaptureEvent, 1),
elCh: make(chan xgb.Event),
win: win,
}
el.Register(res.elCh)
go res.start()
return res
}
func (res *ResizeMonitor) start() {
for ev := range res.elCh {
if ev, ok := ev.(xproto.ConfigureNotifyEvent); ok {
w, h, bw := res.win.Dimensions()
if int(ev.Width) != w || int(ev.Height) != h || int(ev.BorderWidth) != bw {
w, h, bw = int(ev.Width), int(ev.Height), int(ev.BorderWidth)
res.win.SetDimensions(w, h, bw)
select {
case res.C <- CaptureEvent{true}:
default:
}
}
}
}
}
type DamageMonitor struct {
C chan CaptureEvent
elCh chan xgb.Event
conn *xgb.Conn
fps int
win *Window
}
func NewDamageMonitor(conn *xgb.Conn, el *EventLoop, win *Window, fps int) *DamageMonitor {
dmg := &DamageMonitor{
C: make(chan CaptureEvent, 1),
elCh: make(chan xgb.Event),
conn: conn,
fps: fps,
win: win,
}
el.Register(dmg.elCh)
go dmg.startDamage()
go dmg.startCursor()
return dmg
}
func (dmg *DamageMonitor) startDamage() {
xdmg, err := damage.NewDamageId(dmg.conn)
if err != nil {
// XXX fall back gracefully
log.Fatal(err)
}
damage.Create(dmg.conn, xdmg, xproto.Drawable(dmg.win.ID), damage.ReportLevelRawRectangles)
for ev := range dmg.elCh {
if _, ok := ev.(damage.NotifyEvent); ok {
select {
case dmg.C <- CaptureEvent{}:
default:
}
}
}
}
func (dmg *DamageMonitor) startCursor() {
var prevCursor struct{ X, Y int }
prevInWindow := true
d := time.Second / time.Duration(dmg.fps)
t := time.NewTicker(d)
for range t.C {
cursor, err := xproto.QueryPointer(dmg.conn, xproto.Window(dmg.win.ID)).Reply()
if err != nil {
log.Println("Couldn't query cursor position:", err)
continue
}
c := struct{ X, Y int }{int(cursor.WinX), int(cursor.WinY)}
if c == prevCursor {
continue
}
prevCursor = c
damaged := false
w, h, _ := dmg.win.Dimensions()
if c.X < 0 || c.Y < 0 || c.X > w || c.Y > h {
if prevInWindow {
// cursor moved out of the window, which requires a redraw
damaged = true
}
prevInWindow = false
} else {
damaged = true
}
if damaged {
select {
case dmg.C <- CaptureEvent{}:
default:
}
}
}
}
func parseSize(s string) (width, height int, err error) {
err = fmt.Errorf("%q is not a valid size specification", s)
if len(s) < 3 {
return 0, 0, err
}
parts := strings.Split(s, "x")
if len(parts) != 2 {
return 0, 0, err
}
width, err = strconv.Atoi(parts[0])
if err != nil {
return 0, 0, fmt.Errorf("invalid width: %s", err)
}
height, err = strconv.Atoi(parts[0])
if err != nil {
return 0, 0, fmt.Errorf("invalid height: %s", err)
}
return width, height, err
}
func main() {
fps := flag.Uint("fps", 30, "FPS")
winID := flag.Int("win", 0, "Window ID")
size := flag.String("size", "", "Canvas size in the format WxH in pixels. Defaults to the initial size of the captured window")
cfr := flag.Bool("cfr", false, "Use a constant frame rate")
_ = cfr
flag.Parse()
win := &Window{ID: *winID}
xu, err := xgbutil.NewConn()
if err != nil {
log.Fatal("Couldn't connect to X server:", err)
}
if err := composite.Init(xu.Conn()); err != nil {
log.Fatal("COMPOSITE extension is not available:", err)
}
if err := xfixes.Init(xu.Conn()); err != nil {
log.Fatal("XFIXES extension is not available:", err)
}
xfixes.QueryVersion(xu.Conn(), 1, 0)
if err := xshm.Init(xu.Conn()); err != nil {
// TODO(dh) implement a slower version that is not using SHM
log.Fatal("MIT-SHM extension is not available:", err)
}
if err := composite.RedirectWindowChecked(xu.Conn(), xproto.Window(win.ID), composite.RedirectAutomatic).Check(); err != nil {
if err, ok := err.(xproto.AccessError); ok {
log.Fatal("Can't capture window, another program seems to be capturing it already:", err)
}
log.Fatal("Can't capture window:", err)
}
pix, err := xproto.NewPixmapId(xu.Conn())
if err != nil {
log.Fatal("Could not obtain ID for pixmap:", err)
}
composite.NameWindowPixmap(xu.Conn(), xproto.Window(win.ID), pix)
segID, err := xshm.NewSegId(xu.Conn())
if err != nil {
log.Fatal("Could not obtain ID for SHM:", err)
}
// Register event before we query the window size for the first
// time. Otherwise we could race and miss a window resize.
err = xproto.ChangeWindowAttributesChecked(xu.Conn(), xproto.Window(win.ID),
xproto.CwEventMask, []uint32{uint32(xproto.EventMaskStructureNotify)}).Check()
if err != nil {
log.Fatal("Couldn't monitor window for size changes:", err)
}
geom, err := xproto.GetGeometry(xu.Conn(), xproto.Drawable(win.ID)).Reply()
if err != nil {
log.Fatal("Could not determine window dimensions:", err)
}
win.SetDimensions(int(geom.Width), int(geom.Height), int(geom.BorderWidth))
var canvas Canvas
if *size != "" {
width, height, err := parseSize(*size)
if err != nil {
log.Fatal(err)
}
canvas = Canvas{width, height}
} else {
canvas = Canvas{
Width: int(geom.Width),
Height: int(geom.Height),
}
}
buf, err := NewBuffer(canvas.Width*canvas.Height*bytesPerPixel, numPages)
if err != nil {
log.Fatal("Could not create shared memory:", err)
}
if err := xshm.AttachChecked(xu.Conn(), segID, uint32(buf.ShmID), false).Check(); err != nil {
log.Fatal("Could not attach shared memory to X server:", err)
}
i := 0
ch := make(chan Frame)
tags := map[string]string{
"DATE_RECORDED": time.Now().UTC().Format("2006-01-02 15:04:05.999"),
"WINDOW_ID": strconv.Itoa(win.ID),
}
vw := NewVideoWriter(canvas, int(*fps), *cfr, tags, os.Stdout)
if err := vw.Start(); err != nil {
log.Fatal("Couldn't write output:", err)
}
chistMu := &sync.Mutex{}
chist := hdrhistogram.New(int64(1*time.Millisecond), int64(10*time.Second), 3)
whist := hdrhistogram.New(int64(1*time.Millisecond), int64(10*time.Second), 3)
rhist := hdrhistogram.New(int64(1*time.Millisecond), int64(10*time.Second), 3)
var lastSlow time.Time
var slows uint64
go func() {
d := time.Second / time.Duration(*fps)
t := time.NewTicker(d)
start := time.Now()
dupped := 0
var prevFrameTime time.Time
first := true
for ts := range t.C
|
{
if rhist.TotalCount()%int64(*fps) == 0 {
chistMu.Lock()
var cbracket hdrhistogram.Bracket
var wbracket hdrhistogram.Bracket
var rbracket hdrhistogram.Bracket
brackets := chist.CumulativeDistribution()
for _, bracket := range brackets {
if bracket.ValueAt > int64(d) {
break
}
cbracket = bracket
}
brackets = whist.CumulativeDistribution()
for _, bracket := range brackets {
if bracket.ValueAt > int64(d) {
break
}
wbracket = bracket
}
|
conditional_block
|
|
xcapture.go
|
size := b.PageSize
return b.Data[offset : offset+size : offset+size]
}
type BitmapInfoHeader struct {
Size uint32
Width int32
Height int32
Planes uint16
BitCount uint16
Compression [4]byte
SizeImage uint32
XPelsPerMeter int32
YPelsPerMeter int32
ClrUsed uint32
ClrImportant uint32
}
func NewBuffer(pageSize, pages int) (Buffer, error) {
size := pageSize * pages
seg, err := shm.Create(size)
if err != nil {
return Buffer{}, err
}
data, err := seg.Attach()
if err != nil {
return Buffer{}, err
}
sh := &reflect.SliceHeader{
Data: uintptr(data),
Len: size,
Cap: size,
}
b := (*(*[]byte)(unsafe.Pointer(sh)))
return Buffer{
Pages: pages,
PageSize: pageSize,
Data: b,
ShmID: seg.ID,
}, nil
}
type EventLoop struct {
conn *xgb.Conn
mu sync.RWMutex
listeners []chan xgb.Event
}
func NewEventLoop(conn *xgb.Conn) *EventLoop {
el := &EventLoop{conn: conn}
go el.start()
return el
}
func (el *EventLoop) Register(ch chan xgb.Event) {
el.mu.Lock()
defer el.mu.Unlock()
el.listeners = append(el.listeners, ch)
}
func (el *EventLoop) start() {
for {
ev, err := el.conn.WaitForEvent()
if err != nil {
continue
}
el.mu.RLock()
ls := el.listeners
el.mu.RUnlock()
for _, l := range ls {
l <- ev
}
}
}
type CaptureEvent struct {
Resized bool
}
type ResizeMonitor struct {
C chan CaptureEvent
elCh chan xgb.Event
win *Window
}
func NewResizeMonitor(el *EventLoop, win *Window) *ResizeMonitor {
res := &ResizeMonitor{
C: make(chan CaptureEvent, 1),
elCh: make(chan xgb.Event),
win: win,
}
el.Register(res.elCh)
go res.start()
return res
}
func (res *ResizeMonitor) start() {
for ev := range res.elCh {
if ev, ok := ev.(xproto.ConfigureNotifyEvent); ok {
w, h, bw := res.win.Dimensions()
if int(ev.Width) != w || int(ev.Height) != h || int(ev.BorderWidth) != bw {
w, h, bw = int(ev.Width), int(ev.Height), int(ev.BorderWidth)
res.win.SetDimensions(w, h, bw)
select {
case res.C <- CaptureEvent{true}:
default:
}
}
}
}
}
type DamageMonitor struct {
C chan CaptureEvent
elCh chan xgb.Event
conn *xgb.Conn
fps int
win *Window
}
func NewDamageMonitor(conn *xgb.Conn, el *EventLoop, win *Window, fps int) *DamageMonitor {
dmg := &DamageMonitor{
C: make(chan CaptureEvent, 1),
elCh: make(chan xgb.Event),
conn: conn,
fps: fps,
win: win,
}
el.Register(dmg.elCh)
go dmg.startDamage()
go dmg.startCursor()
return dmg
}
func (dmg *DamageMonitor) startDamage() {
xdmg, err := damage.NewDamageId(dmg.conn)
if err != nil {
// XXX fall back gracefully
log.Fatal(err)
}
damage.Create(dmg.conn, xdmg, xproto.Drawable(dmg.win.ID), damage.ReportLevelRawRectangles)
for ev := range dmg.elCh {
if _, ok := ev.(damage.NotifyEvent); ok {
select {
case dmg.C <- CaptureEvent{}:
default:
}
}
}
}
func (dmg *DamageMonitor) startCursor() {
var prevCursor struct{ X, Y int }
prevInWindow := true
d := time.Second / time.Duration(dmg.fps)
t := time.NewTicker(d)
for range t.C {
cursor, err := xproto.QueryPointer(dmg.conn, xproto.Window(dmg.win.ID)).Reply()
if err != nil {
log.Println("Couldn't query cursor position:", err)
continue
}
c := struct{ X, Y int }{int(cursor.WinX), int(cursor.WinY)}
if c == prevCursor {
continue
}
prevCursor = c
damaged := false
w, h, _ := dmg.win.Dimensions()
if c.X < 0 || c.Y < 0 || c.X > w || c.Y > h {
if prevInWindow {
// cursor moved out of the window, which requires a redraw
damaged = true
}
prevInWindow = false
} else {
damaged = true
}
if damaged {
select {
case dmg.C <- CaptureEvent{}:
default:
}
}
}
}
func parseSize(s string) (width, height int, err error) {
err = fmt.Errorf("%q is not a valid size specification", s)
if len(s) < 3 {
return 0, 0, err
}
parts := strings.Split(s, "x")
if len(parts) != 2 {
return 0, 0, err
}
width, err = strconv.Atoi(parts[0])
if err != nil {
return 0, 0, fmt.Errorf("invalid width: %s", err)
}
height, err = strconv.Atoi(parts[0])
if err != nil {
return 0, 0, fmt.Errorf("invalid height: %s", err)
}
return width, height, err
}
func main() {
fps := flag.Uint("fps", 30, "FPS")
winID := flag.Int("win", 0, "Window ID")
size := flag.String("size", "", "Canvas size in the format WxH in pixels. Defaults to the initial size of the captured window")
cfr := flag.Bool("cfr", false, "Use a constant frame rate")
_ = cfr
flag.Parse()
win := &Window{ID: *winID}
xu, err := xgbutil.NewConn()
if err != nil {
log.Fatal("Couldn't connect to X server:", err)
}
if err := composite.Init(xu.Conn()); err != nil {
log.Fatal("COMPOSITE extension is not available:", err)
}
if err := xfixes.Init(xu.Conn()); err != nil {
log.Fatal("XFIXES extension is not available:", err)
}
xfixes.QueryVersion(xu.Conn(), 1, 0)
if err := xshm.Init(xu.Conn()); err != nil {
// TODO(dh) implement a slower version that is not using SHM
log.Fatal("MIT-SHM extension is not available:", err)
}
if err := composite.RedirectWindowChecked(xu.Conn(), xproto.Window(win.ID), composite.RedirectAutomatic).Check(); err != nil {
if err, ok := err.(xproto.AccessError); ok {
log.Fatal("Can't capture window, another program seems to be capturing it already:", err)
}
log.Fatal("Can't capture window:", err)
}
pix, err := xproto.NewPixmapId(xu.Conn())
if err != nil {
log.Fatal("Could not obtain ID for pixmap:", err)
}
composite.NameWindowPixmap(xu.Conn(), xproto.Window(win.ID), pix)
segID, err := xshm.NewSegId(xu.Conn())
if err != nil {
log.Fatal("Could not obtain ID for SHM:", err)
}
// Register event before we query the window size for the first
// time. Otherwise we could race and miss a window resize.
err = xproto.ChangeWindowAttributesChecked(xu.Conn(), xproto.Window(win.ID),
xproto.CwEventMask, []uint32{uint32(xproto.EventMaskStructureNotify)}).Check()
if err != nil {
log.Fatal("Couldn't monitor window for size changes:", err)
}
geom, err := xproto.GetGeometry(xu.Conn(), xproto.Drawable(win.ID)).Reply()
if err != nil {
log.Fatal("Could not determine window dimensions:", err)
}
win.SetDimensions(int(geom.Width), int(geom.Height), int(geom.BorderWidth))
var canvas Canvas
if *size != "" {
width, height, err := parseSize(*size)
if err != nil {
log.Fatal(err)
}
canvas = Canvas{width, height}
} else {
canvas = Canvas{
Width: int(geom.Width),
Height: int(geom.Height),
}
}
buf, err := NewBuffer(canvas.Width*canvas.Height*bytesPerPixel, numPages)
if err != nil {
log.Fatal("Could not create shared memory:", err)
}
if err := xshm.AttachChecked(xu.Conn(), segID, uint32(buf.ShmID), false).
|
}
func (b Buffer) Page(idx int) []byte {
offset := b.PageOffset(idx)
|
random_line_split
|
|
xcapture.go
|
(xs ...int) int {
if len(xs) == 0 {
return 0
}
m := xs[0]
for _, x := range xs[1:] {
if x < m {
m = x
}
}
return m
}
// TODO(dh): this definition of a window is specific to Linux. On
// Windows, for example, we wouldn't have an integer specifier for the
// window.
type Window struct {
ID int
mu sync.RWMutex
width int
height int
borderWidth int
}
func (w *Window) SetDimensions(width, height, border int) {
w.mu.Lock()
defer w.mu.Unlock()
w.width = width
w.height = height
w.borderWidth = border
}
func (w *Window) Dimensions() (width, height, border int) {
w.mu.RLock()
defer w.mu.RUnlock()
return w.width, w.height, w.borderWidth
}
type Canvas struct {
Width int
Height int
}
type Frame struct {
Data []byte
Time time.Time
}
type Buffer struct {
Pages int
PageSize int
Data []byte
ShmID int
}
func (b Buffer) PageOffset(idx int) int {
return b.PageSize * idx
}
func (b Buffer) Page(idx int) []byte {
offset := b.PageOffset(idx)
size := b.PageSize
return b.Data[offset : offset+size : offset+size]
}
type BitmapInfoHeader struct {
Size uint32
Width int32
Height int32
Planes uint16
BitCount uint16
Compression [4]byte
SizeImage uint32
XPelsPerMeter int32
YPelsPerMeter int32
ClrUsed uint32
ClrImportant uint32
}
func NewBuffer(pageSize, pages int) (Buffer, error) {
size := pageSize * pages
seg, err := shm.Create(size)
if err != nil {
return Buffer{}, err
}
data, err := seg.Attach()
if err != nil {
return Buffer{}, err
}
sh := &reflect.SliceHeader{
Data: uintptr(data),
Len: size,
Cap: size,
}
b := (*(*[]byte)(unsafe.Pointer(sh)))
return Buffer{
Pages: pages,
PageSize: pageSize,
Data: b,
ShmID: seg.ID,
}, nil
}
type EventLoop struct {
conn *xgb.Conn
mu sync.RWMutex
listeners []chan xgb.Event
}
func NewEventLoop(conn *xgb.Conn) *EventLoop {
el := &EventLoop{conn: conn}
go el.start()
return el
}
func (el *EventLoop) Register(ch chan xgb.Event) {
el.mu.Lock()
defer el.mu.Unlock()
el.listeners = append(el.listeners, ch)
}
func (el *EventLoop) start() {
for {
ev, err := el.conn.WaitForEvent()
if err != nil {
continue
}
el.mu.RLock()
ls := el.listeners
el.mu.RUnlock()
for _, l := range ls {
l <- ev
}
}
}
type CaptureEvent struct {
Resized bool
}
type ResizeMonitor struct {
C chan CaptureEvent
elCh chan xgb.Event
win *Window
}
func NewResizeMonitor(el *EventLoop, win *Window) *ResizeMonitor {
res := &ResizeMonitor{
C: make(chan CaptureEvent, 1),
elCh: make(chan xgb.Event),
win: win,
}
el.Register(res.elCh)
go res.start()
return res
}
func (res *ResizeMonitor) start() {
for ev := range res.elCh {
if ev, ok := ev.(xproto.ConfigureNotifyEvent); ok {
w, h, bw := res.win.Dimensions()
if int(ev.Width) != w || int(ev.Height) != h || int(ev.BorderWidth) != bw {
w, h, bw = int(ev.Width), int(ev.Height), int(ev.BorderWidth)
res.win.SetDimensions(w, h, bw)
select {
case res.C <- CaptureEvent{true}:
default:
}
}
}
}
}
type DamageMonitor struct {
C chan CaptureEvent
elCh chan xgb.Event
conn *xgb.Conn
fps int
win *Window
}
func NewDamageMonitor(conn *xgb.Conn, el *EventLoop, win *Window, fps int) *DamageMonitor {
dmg := &DamageMonitor{
C: make(chan CaptureEvent, 1),
elCh: make(chan xgb.Event),
conn: conn,
fps: fps,
win: win,
}
el.Register(dmg.elCh)
go dmg.startDamage()
go dmg.startCursor()
return dmg
}
func (dmg *DamageMonitor) startDamage() {
xdmg, err := damage.NewDamageId(dmg.conn)
if err != nil {
// XXX fall back gracefully
log.Fatal(err)
}
damage.Create(dmg.conn, xdmg, xproto.Drawable(dmg.win.ID), damage.ReportLevelRawRectangles)
for ev := range dmg.elCh {
if _, ok := ev.(damage.NotifyEvent); ok {
select {
case dmg.C <- CaptureEvent{}:
default:
}
}
}
}
func (dmg *DamageMonitor) startCursor() {
var prevCursor struct{ X, Y int }
prevInWindow := true
d := time.Second / time.Duration(dmg.fps)
t := time.NewTicker(d)
for range t.C {
cursor, err := xproto.QueryPointer(dmg.conn, xproto.Window(dmg.win.ID)).Reply()
if err != nil {
log.Println("Couldn't query cursor position:", err)
continue
}
c := struct{ X, Y int }{int(cursor.WinX), int(cursor.WinY)}
if c == prevCursor {
continue
}
prevCursor = c
damaged := false
w, h, _ := dmg.win.Dimensions()
if c.X < 0 || c.Y < 0 || c.X > w || c.Y > h {
if prevInWindow {
// cursor moved out of the window, which requires a redraw
damaged = true
}
prevInWindow = false
} else {
damaged = true
}
if damaged {
select {
case dmg.C <- CaptureEvent{}:
default:
}
}
}
}
func parseSize(s string) (width, height int, err error) {
err = fmt.Errorf("%q is not a valid size specification", s)
if len(s) < 3 {
return 0, 0, err
}
parts := strings.Split(s, "x")
if len(parts) != 2 {
return 0, 0, err
}
width, err = strconv.Atoi(parts[0])
if err != nil {
return 0, 0, fmt.Errorf("invalid width: %s", err)
}
height, err = strconv.Atoi(parts[0])
if err != nil {
return 0, 0, fmt.Errorf("invalid height: %s", err)
}
return width, height, err
}
func main() {
fps := flag.Uint("fps", 30, "FPS")
winID := flag.Int("win", 0, "Window ID")
size := flag.String("size", "", "Canvas size in the format WxH in pixels. Defaults to the initial size of the captured window")
cfr := flag.Bool("cfr", false, "Use a constant frame rate")
_ = cfr
flag.Parse()
win := &Window{ID: *winID}
xu, err := xgbutil.NewConn()
if err != nil {
log.Fatal("Couldn't connect to X server:", err)
}
if err := composite.Init(xu.Conn()); err != nil {
log.Fatal("COMPOSITE extension is not available:", err)
}
if err := xfixes.Init(xu.Conn()); err != nil {
log.Fatal("XFIXES extension is not available:", err)
}
xfixes.QueryVersion(xu.Conn(), 1, 0)
if err := xshm.Init(xu.Conn()); err != nil {
// TODO(dh) implement a slower version that is not using SHM
log.Fatal("MIT-SHM extension is not available:", err)
}
if err := composite.RedirectWindowChecked(xu.Conn(), xproto.Window(win.ID), composite.RedirectAutomatic).Check(); err != nil {
if err, ok := err.(xproto.AccessError); ok {
log.Fatal("Can't capture window, another program seems to be capturing it already:", err)
}
log.Fatal("Can't capture window:", err)
}
pix, err := xproto.NewPixmapId(xu.Conn())
if err != nil {
log.Fatal("Could not obtain ID for pixmap:", err)
}
composite.NameWindowPixmap(xu.Conn(), xproto.Window(win.ID), pix)
segID, err := xshm.NewSegId(xu.Conn())
if err != nil {
log.Fatal("Could not obtain ID for SHM:", err)
}
// Register event before we query the window size for the first
// time.
|
min
|
identifier_name
|
|
converter.rs
|
use ConvertedShader;
use error::Error;
#[derive(Debug, Clone)]
pub struct ConverterOptions {
/// Additional directories to search in when resolving `#include` statements.
///
/// The path to the file being converted is always implicity used as a search path, taking
/// priority over any paths listed here.
///
/// Next, the paths listed here are tried in order.
pub include_search_paths: Vec<PathBuf>,
/// Macros to `#define` during compilation. Use `None` to define the macro without a value.
pub macros: HashMap<String, Option<String>>,
pub target_version: GlslVersion,
}
impl Default for ConverterOptions {
fn
|
() -> Self {
ConverterOptions {
include_search_paths: Vec::new(),
macros: HashMap::new(),
target_version: GlslVersion::V1_00Es,
}
}
}
impl ConverterOptions {
pub fn new() -> Self {
Self::default()
}
fn resolve_include(&self,
name: &str,
include_type: shaderc::IncludeType,
_from_path: &str,
_depth: usize) -> Result<shaderc::ResolvedInclude, String> {
let path = match (include_type, PathBuf::from(name).parent()) {
(shaderc::IncludeType::Relative, Some(parent_path)) => {
let mut search_paths_and_parent: Vec<_> = iter::once(parent_path)
.chain(self.include_search_paths.iter().map(|path_buf_ref| {
path_buf_ref as &Path
}))
.collect();
find_source_file(name, &search_paths_and_parent)?
}
_ => find_source_file(name, &self.include_search_paths)?
};
let mut content = String::new();
File::open(&path)
.and_then(|mut include_file| include_file.read_to_string(&mut content))
.map_err(|err| err.to_string())?;
Ok(shaderc::ResolvedInclude {
resolved_name: path.to_string_lossy().to_string(),
content,
})
}
}
pub struct Converter {
compiler: shaderc::Compiler,
}
impl Converter {
pub fn new() -> Result<Self, Error> {
let compiler = shaderc::Compiler::new()
.ok_or(Error::InitFailed)?;
Ok(Self {
compiler
})
}
/// Convert a HLSL file to GLSL.
///
/// # Arguments
///
/// * `source_path` - Location of HLSL source file.
/// * `stage` - Type of GLSL shader to create.
/// * `entry_point` - Name of function to use as entry point for this stage in the HLSL source.
/// * `options` - Converter configuration.
pub fn convert<P>(
&mut self,
source_path: P,
stage: Stage,
entry_point: &str,
options: &ConverterOptions) -> Result<ConvertedShader, Error>
where P: Into<PathBuf>
{
let source_path = source_path.into();
let source_filename = source_path.to_string_lossy();
let mut source = String::new();
File::open(&source_path)?.read_to_string(&mut source)?;
let spirv = self.hlsl_to_spirv(&source,
source_filename.as_ref(),
stage,
entry_point,
options)?;
let module = spirv::Module::from_words(&spirv);
let mut ast = spirv::Ast::<glsl::Target>::parse(&module)?;
spirv::Compile::set_compiler_options(&mut ast, &glsl::CompilerOptions {
version: options.target_version,
vertex: glsl::CompilerVertexOptions {
invert_y: false,
transform_clip_space: false,
},
})?;
let shader = ast.compile()?;
let uniforms = find_uniform_mappings(&ast)?;
Ok(ConvertedShader {
shader,
uniforms,
})
}
fn hlsl_to_spirv(&mut self,
source: &str,
source_filename: &str,
stage: Stage,
entry_point: &str,
options: &ConverterOptions) -> Result<Vec<u32>, Error> {
let mut opts = shaderc::CompileOptions::new().ok_or(Error::InitFailed)?;
opts.set_source_language(shaderc::SourceLanguage::HLSL);
opts.set_target_env(shaderc::TargetEnv::Vulkan, 0);
opts.set_optimization_level(shaderc::OptimizationLevel::Performance);
opts.set_generate_debug_info();
opts.set_include_callback(|name, include_type, from_path, depth| {
options.resolve_include(name, include_type, from_path, depth)
});
for (macro_name, macro_value) in options.macros.iter() {
opts.add_macro_definition(macro_name, macro_value.as_ref().map(|val| val.as_str()));
}
let kind = match stage {
Stage::Fragment => shaderc::ShaderKind::Fragment,
Stage::Vertex => shaderc::ShaderKind::Vertex,
};
let artifact = self.compiler.compile_into_spirv(
&source,
kind,
source_filename,
entry_point,
Some(&opts))?;
if artifact.get_num_warnings() > 0 {
warn!("{}", artifact.get_warning_messages());
}
Ok(artifact.as_binary().to_vec())
}
}
fn find_uniform_mappings(ast: &spirv::Ast<glsl::Target>)
-> Result<HashMap<String, String>, Error> {
let shader_resources = ast.get_shader_resources()?;
let mut mappings = HashMap::new();
/* discover property indices from debug names in the uniform buffers */
for uniform_buffer in shader_resources.uniform_buffers {
for member_name in get_member_names_deep(&ast, uniform_buffer.base_type_id)? {
let flat_name = format!("_{}.{}", uniform_buffer.id, member_name);
mappings.insert(flat_name, member_name);
}
}
/* samplers end up in sampled_images, separate_images and separate_samplers - final IDs
are from sampled_images (the combined sampler resource), and names are from separate_images
(the Texture2D) */
for (image_index, sampled_image) in shader_resources.sampled_images.into_iter().enumerate() {
let image = &shader_resources.separate_images[image_index];
let compiled_name = format!("_{}", sampled_image.id);
mappings.insert(compiled_name, image.name.to_string());
}
Ok(mappings)
}
fn get_member_names_deep(ast: &spirv::Ast<glsl::Target>,
struct_type_id: u32)
-> Result<Vec<String>, Error> {
let (member_types, _member_array_sizes) = match ast.get_type(struct_type_id)? {
spirv::Type::Struct { member_types, array } => (member_types, array),
_ => panic!("uniform buffer must be a struct"),
};
let mut names = Vec::new();
for (member_id, member_type) in member_types.into_iter().enumerate() {
let member_id = member_id as u32;
let member_base_name = ast.get_member_name(struct_type_id, member_id)?;
match ast.get_type(member_type)? {
spirv::Type::Struct { ref array, .. } => {
let element_names = array_member_names(&member_base_name, array);
let member_base_type = ast.get_base_type_id(member_type)?;
let child_names = get_member_names_deep(ast, member_base_type)?;
for element_name in element_names {
for child_name in child_names.iter() {
names.push(format!("{}.{}", element_name, child_name.clone()));
}
}
}
spirv::Type::Float { ref array } |
spirv::Type::Double { ref array } |
spirv::Type::Int { ref array } |
spirv::Type::Int64 { ref array } |
spirv::Type::UInt { ref array } |
spirv::Type::UInt64 { ref array } |
spirv::Type::Boolean { ref array } |
spirv::Type::Char { ref array } |
spirv::Type::Half { ref array } => {
names.extend(array_member_names(&member_base_name, array));
}
spirv::Type::Image { .. } |
spirv::Type::SampledImage { .. } |
spirv::Type::Sampler { .. } |
spirv::Type::AtomicCounter { .. } |
spirv::Type::Void |
spirv::Type::Unknown => {
let msg = format!("member of {} had an unsupported type", member_base_name);
return Err(Error::CompilationFailed(msg));
}
}
}
Ok(names)
}
fn array_member_names(base_name: &str, array_dims: &[u32]) -> Vec<String> {
if array_dims.len() == 0 {
return vec![base_name.to_string()];
}
let mut array_element_names = Vec::new();
for (rank, dim) in array_dims.iter().enumerate() {
let prev_elements = array_element_names.clone();
array_element_names.clear();
for element in 0..*dim {
if rank == 0 {
array_element_names.push(format!("{}[{}]", base_name, element));
} else {
for prev_element in prev_elements.iter() {
array_element_names.push(format!("{}[{}]", prev_element, element));
}
}
}
}
array_element_names
}
fn find_source_file<P>(name: &str, source_paths: &[P]) -> Result<PathBuf,
|
default
|
identifier_name
|
converter.rs
|
use ConvertedShader;
use error::Error;
#[derive(Debug, Clone)]
pub struct ConverterOptions {
/// Additional directories to search in when resolving `#include` statements.
///
/// The path to the file being converted is always implicity used as a search path, taking
/// priority over any paths listed here.
///
/// Next, the paths listed here are tried in order.
pub include_search_paths: Vec<PathBuf>,
/// Macros to `#define` during compilation. Use `None` to define the macro without a value.
pub macros: HashMap<String, Option<String>>,
pub target_version: GlslVersion,
}
impl Default for ConverterOptions {
fn default() -> Self {
ConverterOptions {
include_search_paths: Vec::new(),
macros: HashMap::new(),
target_version: GlslVersion::V1_00Es,
}
}
}
impl ConverterOptions {
pub fn new() -> Self {
Self::default()
}
fn resolve_include(&self,
name: &str,
include_type: shaderc::IncludeType,
_from_path: &str,
_depth: usize) -> Result<shaderc::ResolvedInclude, String> {
let path = match (include_type, PathBuf::from(name).parent()) {
(shaderc::IncludeType::Relative, Some(parent_path)) => {
let mut search_paths_and_parent: Vec<_> = iter::once(parent_path)
.chain(self.include_search_paths.iter().map(|path_buf_ref| {
path_buf_ref as &Path
}))
.collect();
find_source_file(name, &search_paths_and_parent)?
}
_ => find_source_file(name, &self.include_search_paths)?
};
let mut content = String::new();
File::open(&path)
.and_then(|mut include_file| include_file.read_to_string(&mut content))
.map_err(|err| err.to_string())?;
Ok(shaderc::ResolvedInclude {
resolved_name: path.to_string_lossy().to_string(),
content,
})
}
}
pub struct Converter {
compiler: shaderc::Compiler,
}
impl Converter {
pub fn new() -> Result<Self, Error> {
let compiler = shaderc::Compiler::new()
.ok_or(Error::InitFailed)?;
Ok(Self {
compiler
})
}
/// Convert a HLSL file to GLSL.
///
/// # Arguments
///
/// * `source_path` - Location of HLSL source file.
/// * `stage` - Type of GLSL shader to create.
/// * `entry_point` - Name of function to use as entry point for this stage in the HLSL source.
/// * `options` - Converter configuration.
pub fn convert<P>(
&mut self,
source_path: P,
stage: Stage,
entry_point: &str,
options: &ConverterOptions) -> Result<ConvertedShader, Error>
where P: Into<PathBuf>
|
},
})?;
let shader = ast.compile()?;
let uniforms = find_uniform_mappings(&ast)?;
Ok(ConvertedShader {
shader,
uniforms,
})
}
fn hlsl_to_spirv(&mut self,
source: &str,
source_filename: &str,
stage: Stage,
entry_point: &str,
options: &ConverterOptions) -> Result<Vec<u32>, Error> {
let mut opts = shaderc::CompileOptions::new().ok_or(Error::InitFailed)?;
opts.set_source_language(shaderc::SourceLanguage::HLSL);
opts.set_target_env(shaderc::TargetEnv::Vulkan, 0);
opts.set_optimization_level(shaderc::OptimizationLevel::Performance);
opts.set_generate_debug_info();
opts.set_include_callback(|name, include_type, from_path, depth| {
options.resolve_include(name, include_type, from_path, depth)
});
for (macro_name, macro_value) in options.macros.iter() {
opts.add_macro_definition(macro_name, macro_value.as_ref().map(|val| val.as_str()));
}
let kind = match stage {
Stage::Fragment => shaderc::ShaderKind::Fragment,
Stage::Vertex => shaderc::ShaderKind::Vertex,
};
let artifact = self.compiler.compile_into_spirv(
&source,
kind,
source_filename,
entry_point,
Some(&opts))?;
if artifact.get_num_warnings() > 0 {
warn!("{}", artifact.get_warning_messages());
}
Ok(artifact.as_binary().to_vec())
}
}
fn find_uniform_mappings(ast: &spirv::Ast<glsl::Target>)
-> Result<HashMap<String, String>, Error> {
let shader_resources = ast.get_shader_resources()?;
let mut mappings = HashMap::new();
/* discover property indices from debug names in the uniform buffers */
for uniform_buffer in shader_resources.uniform_buffers {
for member_name in get_member_names_deep(&ast, uniform_buffer.base_type_id)? {
let flat_name = format!("_{}.{}", uniform_buffer.id, member_name);
mappings.insert(flat_name, member_name);
}
}
/* samplers end up in sampled_images, separate_images and separate_samplers - final IDs
are from sampled_images (the combined sampler resource), and names are from separate_images
(the Texture2D) */
for (image_index, sampled_image) in shader_resources.sampled_images.into_iter().enumerate() {
let image = &shader_resources.separate_images[image_index];
let compiled_name = format!("_{}", sampled_image.id);
mappings.insert(compiled_name, image.name.to_string());
}
Ok(mappings)
}
fn get_member_names_deep(ast: &spirv::Ast<glsl::Target>,
struct_type_id: u32)
-> Result<Vec<String>, Error> {
let (member_types, _member_array_sizes) = match ast.get_type(struct_type_id)? {
spirv::Type::Struct { member_types, array } => (member_types, array),
_ => panic!("uniform buffer must be a struct"),
};
let mut names = Vec::new();
for (member_id, member_type) in member_types.into_iter().enumerate() {
let member_id = member_id as u32;
let member_base_name = ast.get_member_name(struct_type_id, member_id)?;
match ast.get_type(member_type)? {
spirv::Type::Struct { ref array, .. } => {
let element_names = array_member_names(&member_base_name, array);
let member_base_type = ast.get_base_type_id(member_type)?;
let child_names = get_member_names_deep(ast, member_base_type)?;
for element_name in element_names {
for child_name in child_names.iter() {
names.push(format!("{}.{}", element_name, child_name.clone()));
}
}
}
spirv::Type::Float { ref array } |
spirv::Type::Double { ref array } |
spirv::Type::Int { ref array } |
spirv::Type::Int64 { ref array } |
spirv::Type::UInt { ref array } |
spirv::Type::UInt64 { ref array } |
spirv::Type::Boolean { ref array } |
spirv::Type::Char { ref array } |
spirv::Type::Half { ref array } => {
names.extend(array_member_names(&member_base_name, array));
}
spirv::Type::Image { .. } |
spirv::Type::SampledImage { .. } |
spirv::Type::Sampler { .. } |
spirv::Type::AtomicCounter { .. } |
spirv::Type::Void |
spirv::Type::Unknown => {
let msg = format!("member of {} had an unsupported type", member_base_name);
return Err(Error::CompilationFailed(msg));
}
}
}
Ok(names)
}
fn array_member_names(base_name: &str, array_dims: &[u32]) -> Vec<String> {
if array_dims.len() == 0 {
return vec![base_name.to_string()];
}
let mut array_element_names = Vec::new();
for (rank, dim) in array_dims.iter().enumerate() {
let prev_elements = array_element_names.clone();
array_element_names.clear();
for element in 0..*dim {
if rank == 0 {
array_element_names.push(format!("{}[{}]", base_name, element));
} else {
for prev_element in prev_elements.iter() {
array_element_names.push(format!("{}[{}]", prev_element, element));
}
}
}
}
array_element_names
}
fn find_source_file<P>(name: &str, source_paths: &[P]) -> Result<PathBuf, String
|
{
let source_path = source_path.into();
let source_filename = source_path.to_string_lossy();
let mut source = String::new();
File::open(&source_path)?.read_to_string(&mut source)?;
let spirv = self.hlsl_to_spirv(&source,
source_filename.as_ref(),
stage,
entry_point,
options)?;
let module = spirv::Module::from_words(&spirv);
let mut ast = spirv::Ast::<glsl::Target>::parse(&module)?;
spirv::Compile::set_compiler_options(&mut ast, &glsl::CompilerOptions {
version: options.target_version,
vertex: glsl::CompilerVertexOptions {
invert_y: false,
transform_clip_space: false,
|
identifier_body
|
converter.rs
|
use ConvertedShader;
use error::Error;
#[derive(Debug, Clone)]
pub struct ConverterOptions {
/// Additional directories to search in when resolving `#include` statements.
///
/// The path to the file being converted is always implicity used as a search path, taking
/// priority over any paths listed here.
///
/// Next, the paths listed here are tried in order.
pub include_search_paths: Vec<PathBuf>,
/// Macros to `#define` during compilation. Use `None` to define the macro without a value.
pub macros: HashMap<String, Option<String>>,
pub target_version: GlslVersion,
}
impl Default for ConverterOptions {
fn default() -> Self {
ConverterOptions {
include_search_paths: Vec::new(),
macros: HashMap::new(),
target_version: GlslVersion::V1_00Es,
}
}
}
impl ConverterOptions {
pub fn new() -> Self {
Self::default()
}
fn resolve_include(&self,
name: &str,
include_type: shaderc::IncludeType,
_from_path: &str,
_depth: usize) -> Result<shaderc::ResolvedInclude, String> {
|
}))
.collect();
find_source_file(name, &search_paths_and_parent)?
}
_ => find_source_file(name, &self.include_search_paths)?
};
let mut content = String::new();
File::open(&path)
.and_then(|mut include_file| include_file.read_to_string(&mut content))
.map_err(|err| err.to_string())?;
Ok(shaderc::ResolvedInclude {
resolved_name: path.to_string_lossy().to_string(),
content,
})
}
}
pub struct Converter {
compiler: shaderc::Compiler,
}
impl Converter {
pub fn new() -> Result<Self, Error> {
let compiler = shaderc::Compiler::new()
.ok_or(Error::InitFailed)?;
Ok(Self {
compiler
})
}
/// Convert a HLSL file to GLSL.
///
/// # Arguments
///
/// * `source_path` - Location of HLSL source file.
/// * `stage` - Type of GLSL shader to create.
/// * `entry_point` - Name of function to use as entry point for this stage in the HLSL source.
/// * `options` - Converter configuration.
pub fn convert<P>(
&mut self,
source_path: P,
stage: Stage,
entry_point: &str,
options: &ConverterOptions) -> Result<ConvertedShader, Error>
where P: Into<PathBuf>
{
let source_path = source_path.into();
let source_filename = source_path.to_string_lossy();
let mut source = String::new();
File::open(&source_path)?.read_to_string(&mut source)?;
let spirv = self.hlsl_to_spirv(&source,
source_filename.as_ref(),
stage,
entry_point,
options)?;
let module = spirv::Module::from_words(&spirv);
let mut ast = spirv::Ast::<glsl::Target>::parse(&module)?;
spirv::Compile::set_compiler_options(&mut ast, &glsl::CompilerOptions {
version: options.target_version,
vertex: glsl::CompilerVertexOptions {
invert_y: false,
transform_clip_space: false,
},
})?;
let shader = ast.compile()?;
let uniforms = find_uniform_mappings(&ast)?;
Ok(ConvertedShader {
shader,
uniforms,
})
}
fn hlsl_to_spirv(&mut self,
source: &str,
source_filename: &str,
stage: Stage,
entry_point: &str,
options: &ConverterOptions) -> Result<Vec<u32>, Error> {
let mut opts = shaderc::CompileOptions::new().ok_or(Error::InitFailed)?;
opts.set_source_language(shaderc::SourceLanguage::HLSL);
opts.set_target_env(shaderc::TargetEnv::Vulkan, 0);
opts.set_optimization_level(shaderc::OptimizationLevel::Performance);
opts.set_generate_debug_info();
opts.set_include_callback(|name, include_type, from_path, depth| {
options.resolve_include(name, include_type, from_path, depth)
});
for (macro_name, macro_value) in options.macros.iter() {
opts.add_macro_definition(macro_name, macro_value.as_ref().map(|val| val.as_str()));
}
let kind = match stage {
Stage::Fragment => shaderc::ShaderKind::Fragment,
Stage::Vertex => shaderc::ShaderKind::Vertex,
};
let artifact = self.compiler.compile_into_spirv(
&source,
kind,
source_filename,
entry_point,
Some(&opts))?;
if artifact.get_num_warnings() > 0 {
warn!("{}", artifact.get_warning_messages());
}
Ok(artifact.as_binary().to_vec())
}
}
fn find_uniform_mappings(ast: &spirv::Ast<glsl::Target>)
-> Result<HashMap<String, String>, Error> {
let shader_resources = ast.get_shader_resources()?;
let mut mappings = HashMap::new();
/* discover property indices from debug names in the uniform buffers */
for uniform_buffer in shader_resources.uniform_buffers {
for member_name in get_member_names_deep(&ast, uniform_buffer.base_type_id)? {
let flat_name = format!("_{}.{}", uniform_buffer.id, member_name);
mappings.insert(flat_name, member_name);
}
}
/* samplers end up in sampled_images, separate_images and separate_samplers - final IDs
are from sampled_images (the combined sampler resource), and names are from separate_images
(the Texture2D) */
for (image_index, sampled_image) in shader_resources.sampled_images.into_iter().enumerate() {
let image = &shader_resources.separate_images[image_index];
let compiled_name = format!("_{}", sampled_image.id);
mappings.insert(compiled_name, image.name.to_string());
}
Ok(mappings)
}
fn get_member_names_deep(ast: &spirv::Ast<glsl::Target>,
struct_type_id: u32)
-> Result<Vec<String>, Error> {
let (member_types, _member_array_sizes) = match ast.get_type(struct_type_id)? {
spirv::Type::Struct { member_types, array } => (member_types, array),
_ => panic!("uniform buffer must be a struct"),
};
let mut names = Vec::new();
for (member_id, member_type) in member_types.into_iter().enumerate() {
let member_id = member_id as u32;
let member_base_name = ast.get_member_name(struct_type_id, member_id)?;
match ast.get_type(member_type)? {
spirv::Type::Struct { ref array, .. } => {
let element_names = array_member_names(&member_base_name, array);
let member_base_type = ast.get_base_type_id(member_type)?;
let child_names = get_member_names_deep(ast, member_base_type)?;
for element_name in element_names {
for child_name in child_names.iter() {
names.push(format!("{}.{}", element_name, child_name.clone()));
}
}
}
spirv::Type::Float { ref array } |
spirv::Type::Double { ref array } |
spirv::Type::Int { ref array } |
spirv::Type::Int64 { ref array } |
spirv::Type::UInt { ref array } |
spirv::Type::UInt64 { ref array } |
spirv::Type::Boolean { ref array } |
spirv::Type::Char { ref array } |
spirv::Type::Half { ref array } => {
names.extend(array_member_names(&member_base_name, array));
}
spirv::Type::Image { .. } |
spirv::Type::SampledImage { .. } |
spirv::Type::Sampler { .. } |
spirv::Type::AtomicCounter { .. } |
spirv::Type::Void |
spirv::Type::Unknown => {
let msg = format!("member of {} had an unsupported type", member_base_name);
return Err(Error::CompilationFailed(msg));
}
}
}
Ok(names)
}
fn array_member_names(base_name: &str, array_dims: &[u32]) -> Vec<String> {
if array_dims.len() == 0 {
return vec![base_name.to_string()];
}
let mut array_element_names = Vec::new();
for (rank, dim) in array_dims.iter().enumerate() {
let prev_elements = array_element_names.clone();
array_element_names.clear();
for element in 0..*dim {
if rank == 0 {
array_element_names.push(format!("{}[{}]", base_name, element));
} else {
for prev_element in prev_elements.iter() {
array_element_names.push(format!("{}[{}]", prev_element, element));
}
}
}
}
array_element_names
}
fn find_source_file<P>(name: &str, source_paths: &[P]) -> Result<PathBuf, String>
|
let path = match (include_type, PathBuf::from(name).parent()) {
(shaderc::IncludeType::Relative, Some(parent_path)) => {
let mut search_paths_and_parent: Vec<_> = iter::once(parent_path)
.chain(self.include_search_paths.iter().map(|path_buf_ref| {
path_buf_ref as &Path
|
random_line_split
|
controllers.js
|
');
$scope.user = {};
$rootScope.isWelcomePage = false;
$scope.cancel = function() {
$ionicViewSwitcher.nextDirection('back');
$rootScope.shouldHide = true;
$state.go('/01-welcome');
};
$scope.checkInput = function() {
// console.log('Check input fired');
var reName = /^[a-z ,.'-]+$/i;
var reEmail = /^(([^<>()[\]\\.,;:\s@"]+(\.[^<>()[\]\\.,;:\s@"]+)*)|(".+"))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}])|(([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,}))$/;
var isNameOk = reName.test($scope.user.name);
var isEmailOk = reEmail.test($scope.user.email);
if (isNameOk && isEmailOk) {
$scope.isFormValid = true;
} else {
$scope.isFormValid = false;
}
};
$scope.register = function() {
// form validation
if ($scope.isFormValid) {
// Spinner
$ionicLoading.show({
animation: 'fade-in',
showBackdrop: true,
maxWidth: 200,
showDelay: 0
});
Mandrill.sendMail($scope.user.name, $scope.user.email, function() {
// Save a user
// find a file: users_iPadID.json
Dropbox.appendUser($scope.user.name, $scope.user.email, function() {
console.log('User saved.');
$rootScope.userToSend = $scope.user.name;
$rootScope.emailToSend = $scope.user.email;
});
// Load images
Dropbox.getImages(function() {
$rootScope.gallery = $rootScope.gallery.chunk(6);
$ionicViewSwitcher.nextDirection('forward');
$state.go('/03-gallery');
});
});
} else {
Dialogs.alert('One or more of your inputs is invalid. Please try again.', 'Got it', function() {});
}
};
})
/* 03-image-selection */
.controller('GalleryCtrl', function(
$scope,
$rootScope,
$cordovaOauth,
$cordovaFile,
$cordovaPrinter,
$cordovaSocialSharing,
$cordovaFileTransfer,
$timeout,
Mandrill,
ngDialog,
Dialogs,
$ionicLoading,
$ionicSlideBoxDelegate,
Dropbox) {
// imageLoaded
console.log('```` Rendering Gallery');
// $rootScope.backgroundPosX = -80;
|
animation: 'fade-in',
showBackdrop: true,
maxWidth: 200,
showDelay: 0
});
$ionicSlideBoxDelegate.slide(0, 500);
$timeout(function() {
Dropbox.getImages(function() {
$rootScope.gallery = $rootScope.gallery.chunk(6);
$ionicSlideBoxDelegate.update();
// Update Slide
$timeout(function() {
$ionicLoading.hide();
}, 1000);
});
}, 1000);
};
$scope.openImageModal = function(imgurl) {
console.log('open image modal');
IMG_TO_SHARE = imgurl;
$scope.imgToShare = IMG_TO_SHARE;
ngDialog.open({
template: 'views/imageModal.html',
scope: $scope,
controller: function($scope, $rootScope) {
console.log('you just selected %s', IMG_TO_SHARE);
/*** TWITTER LOGIN ***/
$scope.shareViaTwitter = function() {
if (window.cordova) {
window.cookies.clear(function() {
console.log('Cookies cleared!');
});
}
$cordovaOauth.twitter(TWITTER_API_KEY, TWITTER_SECRET_KEY)
.then(function(result) {
console.log('twitter login results: ' + JSON.stringify(result, null, '\t'));
/*example result object:
{ "oauth_token": "2795506425-A7gBaNkh1cKbNUKkivnjtldMVvbJ7AXlL4BdC4I",
"oauth_token_secret": "DLIy2ux3n2U4Aq6wcoSIiyNlm7KcEiEzFpNcbGMQwOyJh",
"user_id": "2795506425", "screen_name": "momentus_io" } */
$rootScope.socialToShare = 'Twitter';
$rootScope.twitter_token = result.oauth_token;
$rootScope.twitter_secret_token = result.oauth_token_secret;
$scope.closeThisDialog();
$rootScope.goToPage('/04-share');
}, function(error) {
console.log('twitter login error: ' + JSON.stringify(error));
$rootScope.isErrorSignIn = true;
Dialogs.alert('Unable to complete the sign-in process. Please try again.', 'Got it');
});
};
/*** FACEBOOK LOGIN ***/
$scope.shareViaFacebook = function() {
if (window.cordova) {
window.cookies.clear(function() {
console.log('Cookies cleared!');
});
}
$cordovaOauth.facebook(FACEBOOK_APP_ID, ['email', 'publish_actions'])
.then(function(result) {
console.log('fb login results: ' + JSON.stringify(result, null, '\t'));
$rootScope.socialToShare = 'Facebook';
$rootScope.fb_token = result.access_token;
$scope.closeThisDialog();
$rootScope.goToPage('/04-share');
}, function(error) {
console.log('error: ' + error);
$rootScope.isErrorSignIn = true;
Dialogs.alert('Unable to complete the sign-in process. Please try again.', 'Got it');
});
};
/*** MANDRILL ***/
$scope.shareViaEmail = function() {
console.log('Share via Email');
Dialogs.confirm('Do you want to share this photo to your registered email?', ['Cancel', 'Send Email'], function() {
// cancel
console.log('COMFIRM EMAIL SEND');
$scope.closeThisDialog();
}, function() {
// send email
// goto thank you
console.log('COMPILING TPL');
// console.log(EMAIL_TPL_PHOTO);
var compiled = _.template(EMAIL_TPL_PHOTO);
EMAIL_PHOTO_COMPILED = compiled({
'source_image': IMG_TO_SHARE
});
// console.log(EMAIL_TPL_PHOTO);
Mandrill.sharePhoto($rootScope.userToSend, $rootScope.emailToSend, function() {
$rootScope.socialToShare = 'Email';
// $scope.closeThisDialog();
// $rootScope.goToPage('/05-thankyou');
Dialogs.confirm('Your photo has been sent. Would you like to share another?', ['No, I\'m Finished', 'Share Again'], function() {
// No
$scope.closeThisDialog();
$rootScope.goToPage('/05-thankyou');
}, function() {
// Yes
$scope.closeThisDialog();
});
});
});
};
/*** AIR PRINT ***/
$scope.shareViaPrint = function() {
console.log('HIT PRINTER');
var page =
'<body style="margin: 0; padding: 0;"><div style="margin: 0; padding: 0px; position: absolute; top: 0px; left: 0px; width: 100%; height: 100%; background: url(' + $rootScope.overlayImg + ') no-repeat; background-size: cover; background-position: 50%;"><center><div style="position: relative; margin-top: 170px;"><img width="80%" src="' + IMG_TO_SHARE + '"></div></center></div></body>';
cordova.plugins.printer.print(page, 'Document.html', function() {
$rootScope.socialToShare = 'Print';
// $scope.closeThisDialog();
// $rootScope.goToPage('/05-thankyou');
Dialogs.confirm('Your photo has been sent to the printer. Would you like to share another?', ['No, I\'m Finished', 'Share Again'], function() {
// No
$scope.closeThisDialog();
$rootScope.goToPage('/05-thankyou');
}, function() {
// Yes
$scope.closeThisDialog();
});
});
}; // end shareViaPrint
} // end controller
}); // end ngDialog.open
}; // end openImageModal
})
/* 04-share */
.controller('ShareCtrl', function(
$scope,
$rootScope,
$ionicViewSwitcher,
$http,
$cordovaSocialSharing,
$cordovaFile,
Dialogs,
Dropbox,
$ionicLoading,
$state) {
console.log('```` Rendering Share');
// $rootScope.backgroundPosX = -120;
$scope.imgToShare = IMG_TO_SHARE;
$scope.back = function() {
$ionicViewSwitcher.nextDirection('back');
$state.go('/03-gallery');
};
$scope.postOnTwitter = function(msgtoshare) {
console.log("hit postOnTwitter.");
$
|
$scope.refreshGallery = function() {
console.log('refreshing gallery');
$ionicLoading.show({
|
random_line_split
|
main.rs
|
100.mhz()).freeze();
let mut delay = hal::delay::Delay::new(cp.SYST, clocks);
let gpiod = dp.GPIOD.split();
let bps = Bps(115200);
let mut tx = configure(dp.USART3, gpiod.pd8, gpiod.pd9, bps, clocks);
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "AHB1: ");
(clocks.hclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1: ");
(clocks.pclk1().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2: ");
(clocks.pclk2().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1 Prescaler: ");
clocks.ppre1().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2 Prescaler: ");
clocks.ppre2().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "System Frequency: ");
(clocks.sysclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
// let clock_info = format!("AHB1: {:?}", clocks.hclk());
// let clock_info = format!("AHB1: {}", 100);
///////////////////////////////////////////////////////////////////////
// Pin Setup
//////////////////////////////////////////////////////////////////////
// Use PD0 RX, PD1 TX
setup_can_gpio(gpiod.pd0, gpiod.pd1);
let rcc = unsafe { &(*stm32::RCC::ptr()) };
// Enable the clock for the can peripheral
rcc.apb1enr.modify(|_, w| w.can1en().set_bit());
// Need to figure out if there is a safe way to grab this peripheral
let can1 = unsafe { &(*stm32::CAN1::ptr()) };
// Exit from sleep mode
can1.mcr.modify(|_, w| w.sleep().clear_bit());
// request initialization
can1.mcr.modify(|_, w| w.inrq().set_bit());
// Wait for INAK bit in MSR to be set to indicate initialization is active
loop {
if can1.msr.read().inak().bit() {
break;
}
write_string_to_serial(&mut tx, "Waiting for initialization\n");
}
unsafe {
can1.mcr.modify(|_, w| {
w.ttcm()
.clear_bit()
.abom()
.clear_bit()
.awum()
.clear_bit()
.nart()
.clear_bit()
.rflm()
.clear_bit()
.txfp()
.clear_bit()
});
}
// Enable loopback mode so we can receive what we are sending.
// Note: This will still send data out the TX pin unless silent mode is enabled.
// Sets the timing to 125kbaud
unsafe {
can1.btr.modify(|_, w| {
w.lbkm()
.enabled()
.sjw()
.bits(2)
.ts2()
.bits(5)
.ts1()
.bits(8)
.brp()
.bits(24)
});
}
// Note: This was what was tested and seemed like a 1.5mbaud rate??
// unsafe {
// can1.btr.modify(|_, w| {
// w.lbkm()
// .enabled()
// .sjw()
// .bits(0)
// .ts2()
// .bits(3)
// .ts1()
// .bits(2)
// .brp()
// .bits(1)
// });
// }
if !can1.msr.read().inak().bit() {
write_string_to_serial(&mut tx, "INAK is cleared\n");
} else {
write_string_to_serial(&mut tx, "INAK is set\n");
}
// Switch hardware into normal mode.
can1.mcr.modify(|_, w| w.inrq().clear_bit());
// Wait for INAK bit in MSR to be cleared to indicate init has completed
loop {
if !can1.msr.read().inak().bit() {
break;
}
delay.delay_ms(1000_u32);
write_string_to_serial(&mut tx, "Waiting for INAK to be cleared\n");
}
write_string_to_serial(&mut tx, "INAK cleared\n");
// Set to standard identifier
unsafe {
can1.tx[0]
.tir
.modify(|_, w| w.ide().standard().stid().bits(12));
}
unsafe {
can1.tx[0].tdtr.modify(|_, w| w.dlc().bits(8));
}
unsafe {
can1.tx[0].tdlr.write(|w| w.bits(0x04030201));
can1.tx[0].tdhr.write(|w| w.bits(0x08070605));
}
// Start transmission
can1.tx[0].tir.modify(|_, w| w.txrq().set_bit());
|
loop {
if can1.tx[0].tir.read().txrq().bit_is_clear() {
break;
}
}
loop {
led.set_high().unwrap();
delay.delay_ms(1000_u32);
led.set_low().unwrap();
delay.delay_ms(1000_u32);
}
}
loop {
// your code goes here
}
}
pub fn setup_can_gpio<X, Y>(rx: PD0<X>, tx: PD1<Y>) {
// CAN1 RX - PG0, TX - PG1
// CAN1 RX - PA11, TX - PA12
// CAN1 RX - PD0, TX - PD1 ---
// CAN1 RX - PB8, TX - PB9
// CAN2 RX - PB12, TX - PB13
// CAN2 RX - PG11, TX - PG12
// CAN2 RX - PB5, TX - PB6
// CAN3 RX - PA8, TX - PA15
// CAN3 RX - PB3, TX - PB4
// Use PD0 RX, PD1 TX
let _can_rx = rx.into_alternate_af9();
let _can_tx = tx.into_alternate_af9();
}
pub fn write_string_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
string: &str,
) {
write_bytes_to_serial(tx, string.as_bytes());
}
pub fn write_bytes_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
bytes: &[u8],
) {
for byte in bytes.iter() {
block!(tx.write(*byte)).unwrap();
}
}
pub fn configure<X, Y>(
uart: USART3,
tx: PD8<X>,
rx: PD9<Y>,
baudrate: Bps,
clocks: Clocks,
) -> hal::serial::Tx<stm32f4::stm32f413::USART3> {
let config = Config {
baudrate,
..Config::default()
};
let tx = tx.into_alternate_af7();
let rx = rx.into_alternate_af7();
let serial = Serial::usart3(uart, (tx, rx), config, clocks).unwrap();
let (tx, _) = serial.split();
tx
}
// Can FLOW:
// CAN clocks are enabled in RCC_APB1ENR
// CAN1 RX - PG0, TX
|
random_line_split
|
|
main.rs
|
(clocks.hclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1: ");
(clocks.pclk1().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2: ");
(clocks.pclk2().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1 Prescaler: ");
clocks.ppre1().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2 Prescaler: ");
clocks.ppre2().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "System Frequency: ");
(clocks.sysclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
// let clock_info = format!("AHB1: {:?}", clocks.hclk());
// let clock_info = format!("AHB1: {}", 100);
///////////////////////////////////////////////////////////////////////
// Pin Setup
//////////////////////////////////////////////////////////////////////
// Use PD0 RX, PD1 TX
setup_can_gpio(gpiod.pd0, gpiod.pd1);
let rcc = unsafe { &(*stm32::RCC::ptr()) };
// Enable the clock for the can peripheral
rcc.apb1enr.modify(|_, w| w.can1en().set_bit());
// Need to figure out if there is a safe way to grab this peripheral
let can1 = unsafe { &(*stm32::CAN1::ptr()) };
// Exit from sleep mode
can1.mcr.modify(|_, w| w.sleep().clear_bit());
// request initialization
can1.mcr.modify(|_, w| w.inrq().set_bit());
// Wait for INAK bit in MSR to be set to indicate initialization is active
loop {
if can1.msr.read().inak().bit() {
break;
}
write_string_to_serial(&mut tx, "Waiting for initialization\n");
}
unsafe {
can1.mcr.modify(|_, w| {
w.ttcm()
.clear_bit()
.abom()
.clear_bit()
.awum()
.clear_bit()
.nart()
.clear_bit()
.rflm()
.clear_bit()
.txfp()
.clear_bit()
});
}
// Enable loopback mode so we can receive what we are sending.
// Note: This will still send data out the TX pin unless silent mode is enabled.
// Sets the timing to 125kbaud
unsafe {
can1.btr.modify(|_, w| {
w.lbkm()
.enabled()
.sjw()
.bits(2)
.ts2()
.bits(5)
.ts1()
.bits(8)
.brp()
.bits(24)
});
}
// Note: This was what was tested and seemed like a 1.5mbaud rate??
// unsafe {
// can1.btr.modify(|_, w| {
// w.lbkm()
// .enabled()
// .sjw()
// .bits(0)
// .ts2()
// .bits(3)
// .ts1()
// .bits(2)
// .brp()
// .bits(1)
// });
// }
if !can1.msr.read().inak().bit() {
write_string_to_serial(&mut tx, "INAK is cleared\n");
} else {
write_string_to_serial(&mut tx, "INAK is set\n");
}
// Switch hardware into normal mode.
can1.mcr.modify(|_, w| w.inrq().clear_bit());
// Wait for INAK bit in MSR to be cleared to indicate init has completed
loop {
if !can1.msr.read().inak().bit() {
break;
}
delay.delay_ms(1000_u32);
write_string_to_serial(&mut tx, "Waiting for INAK to be cleared\n");
}
write_string_to_serial(&mut tx, "INAK cleared\n");
// Set to standard identifier
unsafe {
can1.tx[0]
.tir
.modify(|_, w| w.ide().standard().stid().bits(12));
}
unsafe {
can1.tx[0].tdtr.modify(|_, w| w.dlc().bits(8));
}
unsafe {
can1.tx[0].tdlr.write(|w| w.bits(0x04030201));
can1.tx[0].tdhr.write(|w| w.bits(0x08070605));
}
// Start transmission
can1.tx[0].tir.modify(|_, w| w.txrq().set_bit());
loop {
if can1.tx[0].tir.read().txrq().bit_is_clear() {
break;
}
}
loop {
led.set_high().unwrap();
delay.delay_ms(1000_u32);
led.set_low().unwrap();
delay.delay_ms(1000_u32);
}
}
loop {
// your code goes here
}
}
pub fn setup_can_gpio<X, Y>(rx: PD0<X>, tx: PD1<Y>) {
// CAN1 RX - PG0, TX - PG1
// CAN1 RX - PA11, TX - PA12
// CAN1 RX - PD0, TX - PD1 ---
// CAN1 RX - PB8, TX - PB9
// CAN2 RX - PB12, TX - PB13
// CAN2 RX - PG11, TX - PG12
// CAN2 RX - PB5, TX - PB6
// CAN3 RX - PA8, TX - PA15
// CAN3 RX - PB3, TX - PB4
// Use PD0 RX, PD1 TX
let _can_rx = rx.into_alternate_af9();
let _can_tx = tx.into_alternate_af9();
}
pub fn write_string_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
string: &str,
) {
write_bytes_to_serial(tx, string.as_bytes());
}
pub fn write_bytes_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
bytes: &[u8],
) {
for byte in bytes.iter() {
block!(tx.write(*byte)).unwrap();
}
}
pub fn configure<X, Y>(
uart: USART3,
tx: PD8<X>,
rx: PD9<Y>,
baudrate: Bps,
clocks: Clocks,
) -> hal::serial::Tx<stm32f4::stm32f413::USART3> {
let config = Config {
baudrate,
..Config::default()
};
|
{
if let (Some(dp), Some(cp)) = (
stm32::Peripherals::take(),
cortex_m::peripheral::Peripherals::take(),
) {
let gpiob = dp.GPIOB.split();
let mut led = gpiob.pb7.into_push_pull_output();
let rcc = dp.RCC.constrain();
let clocks = rcc.cfgr.sysclk(100.mhz()).freeze();
let mut delay = hal::delay::Delay::new(cp.SYST, clocks);
let gpiod = dp.GPIOD.split();
let bps = Bps(115200);
let mut tx = configure(dp.USART3, gpiod.pd8, gpiod.pd9, bps, clocks);
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "AHB1: ");
|
identifier_body
|
|
main.rs
|
100.mhz()).freeze();
let mut delay = hal::delay::Delay::new(cp.SYST, clocks);
let gpiod = dp.GPIOD.split();
let bps = Bps(115200);
let mut tx = configure(dp.USART3, gpiod.pd8, gpiod.pd9, bps, clocks);
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "AHB1: ");
(clocks.hclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1: ");
(clocks.pclk1().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2: ");
(clocks.pclk2().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1 Prescaler: ");
clocks.ppre1().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2 Prescaler: ");
clocks.ppre2().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "System Frequency: ");
(clocks.sysclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
// let clock_info = format!("AHB1: {:?}", clocks.hclk());
// let clock_info = format!("AHB1: {}", 100);
///////////////////////////////////////////////////////////////////////
// Pin Setup
//////////////////////////////////////////////////////////////////////
// Use PD0 RX, PD1 TX
setup_can_gpio(gpiod.pd0, gpiod.pd1);
let rcc = unsafe { &(*stm32::RCC::ptr()) };
// Enable the clock for the can peripheral
rcc.apb1enr.modify(|_, w| w.can1en().set_bit());
// Need to figure out if there is a safe way to grab this peripheral
let can1 = unsafe { &(*stm32::CAN1::ptr()) };
// Exit from sleep mode
can1.mcr.modify(|_, w| w.sleep().clear_bit());
// request initialization
can1.mcr.modify(|_, w| w.inrq().set_bit());
// Wait for INAK bit in MSR to be set to indicate initialization is active
loop {
if can1.msr.read().inak().bit() {
break;
}
write_string_to_serial(&mut tx, "Waiting for initialization\n");
}
unsafe {
can1.mcr.modify(|_, w| {
w.ttcm()
.clear_bit()
.abom()
.clear_bit()
.awum()
.clear_bit()
.nart()
.clear_bit()
.rflm()
.clear_bit()
.txfp()
.clear_bit()
});
}
// Enable loopback mode so we can receive what we are sending.
// Note: This will still send data out the TX pin unless silent mode is enabled.
// Sets the timing to 125kbaud
unsafe {
can1.btr.modify(|_, w| {
w.lbkm()
.enabled()
.sjw()
.bits(2)
.ts2()
.bits(5)
.ts1()
.bits(8)
.brp()
.bits(24)
});
}
// Note: This was what was tested and seemed like a 1.5mbaud rate??
// unsafe {
// can1.btr.modify(|_, w| {
// w.lbkm()
// .enabled()
// .sjw()
// .bits(0)
// .ts2()
// .bits(3)
// .ts1()
// .bits(2)
// .brp()
// .bits(1)
// });
// }
if !can1.msr.read().inak().bit() {
write_string_to_serial(&mut tx, "INAK is cleared\n");
} else {
write_string_to_serial(&mut tx, "INAK is set\n");
}
// Switch hardware into normal mode.
can1.mcr.modify(|_, w| w.inrq().clear_bit());
// Wait for INAK bit in MSR to be cleared to indicate init has completed
loop {
if !can1.msr.read().inak().bit()
|
delay.delay_ms(1000_u32);
write_string_to_serial(&mut tx, "Waiting for INAK to be cleared\n");
}
write_string_to_serial(&mut tx, "INAK cleared\n");
// Set to standard identifier
unsafe {
can1.tx[0]
.tir
.modify(|_, w| w.ide().standard().stid().bits(12));
}
unsafe {
can1.tx[0].tdtr.modify(|_, w| w.dlc().bits(8));
}
unsafe {
can1.tx[0].tdlr.write(|w| w.bits(0x04030201));
can1.tx[0].tdhr.write(|w| w.bits(0x08070605));
}
// Start transmission
can1.tx[0].tir.modify(|_, w| w.txrq().set_bit());
loop {
if can1.tx[0].tir.read().txrq().bit_is_clear() {
break;
}
}
loop {
led.set_high().unwrap();
delay.delay_ms(1000_u32);
led.set_low().unwrap();
delay.delay_ms(1000_u32);
}
}
loop {
// your code goes here
}
}
pub fn setup_can_gpio<X, Y>(rx: PD0<X>, tx: PD1<Y>) {
// CAN1 RX - PG0, TX - PG1
// CAN1 RX - PA11, TX - PA12
// CAN1 RX - PD0, TX - PD1 ---
// CAN1 RX - PB8, TX - PB9
// CAN2 RX - PB12, TX - PB13
// CAN2 RX - PG11, TX - PG12
// CAN2 RX - PB5, TX - PB6
// CAN3 RX - PA8, TX - PA15
// CAN3 RX - PB3, TX - PB4
// Use PD0 RX, PD1 TX
let _can_rx = rx.into_alternate_af9();
let _can_tx = tx.into_alternate_af9();
}
pub fn write_string_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
string: &str,
) {
write_bytes_to_serial(tx, string.as_bytes());
}
pub fn write_bytes_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
bytes: &[u8],
) {
for byte in bytes.iter() {
block!(tx.write(*byte)).unwrap();
}
}
pub fn configure<X, Y>(
uart: USART3,
tx: PD8<X>,
rx: PD9<Y>,
baudrate: Bps,
clocks: Clocks,
) -> hal::serial::Tx<stm32f4::stm32f413::USART3> {
let config = Config {
baudrate,
..Config::default()
};
let tx = tx.into_alternate_af7();
let rx = rx.into_alternate_af7();
let serial = Serial::usart3(uart, (tx, rx), config, clocks).unwrap();
let (tx, _) = serial.split();
tx
}
// Can FLOW:
// CAN clocks are enabled in RCC_APB1ENR
// CAN1 RX - PG0,
|
{
break;
}
|
conditional_block
|
main.rs
|
100.mhz()).freeze();
let mut delay = hal::delay::Delay::new(cp.SYST, clocks);
let gpiod = dp.GPIOD.split();
let bps = Bps(115200);
let mut tx = configure(dp.USART3, gpiod.pd8, gpiod.pd9, bps, clocks);
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "AHB1: ");
(clocks.hclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1: ");
(clocks.pclk1().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2: ");
(clocks.pclk2().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB1 Prescaler: ");
clocks.ppre1().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "APB2 Prescaler: ");
clocks.ppre2().numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
let mut buffer = [0u8; 20];
write_string_to_serial(&mut tx, "System Frequency: ");
(clocks.sysclk().0 / 1000000).numtoa_str(10, &mut buffer);
write_bytes_to_serial(&mut tx, &buffer);
write_string_to_serial(&mut tx, "\n");
// let clock_info = format!("AHB1: {:?}", clocks.hclk());
// let clock_info = format!("AHB1: {}", 100);
///////////////////////////////////////////////////////////////////////
// Pin Setup
//////////////////////////////////////////////////////////////////////
// Use PD0 RX, PD1 TX
setup_can_gpio(gpiod.pd0, gpiod.pd1);
let rcc = unsafe { &(*stm32::RCC::ptr()) };
// Enable the clock for the can peripheral
rcc.apb1enr.modify(|_, w| w.can1en().set_bit());
// Need to figure out if there is a safe way to grab this peripheral
let can1 = unsafe { &(*stm32::CAN1::ptr()) };
// Exit from sleep mode
can1.mcr.modify(|_, w| w.sleep().clear_bit());
// request initialization
can1.mcr.modify(|_, w| w.inrq().set_bit());
// Wait for INAK bit in MSR to be set to indicate initialization is active
loop {
if can1.msr.read().inak().bit() {
break;
}
write_string_to_serial(&mut tx, "Waiting for initialization\n");
}
unsafe {
can1.mcr.modify(|_, w| {
w.ttcm()
.clear_bit()
.abom()
.clear_bit()
.awum()
.clear_bit()
.nart()
.clear_bit()
.rflm()
.clear_bit()
.txfp()
.clear_bit()
});
}
// Enable loopback mode so we can receive what we are sending.
// Note: This will still send data out the TX pin unless silent mode is enabled.
// Sets the timing to 125kbaud
unsafe {
can1.btr.modify(|_, w| {
w.lbkm()
.enabled()
.sjw()
.bits(2)
.ts2()
.bits(5)
.ts1()
.bits(8)
.brp()
.bits(24)
});
}
// Note: This was what was tested and seemed like a 1.5mbaud rate??
// unsafe {
// can1.btr.modify(|_, w| {
// w.lbkm()
// .enabled()
// .sjw()
// .bits(0)
// .ts2()
// .bits(3)
// .ts1()
// .bits(2)
// .brp()
// .bits(1)
// });
// }
if !can1.msr.read().inak().bit() {
write_string_to_serial(&mut tx, "INAK is cleared\n");
} else {
write_string_to_serial(&mut tx, "INAK is set\n");
}
// Switch hardware into normal mode.
can1.mcr.modify(|_, w| w.inrq().clear_bit());
// Wait for INAK bit in MSR to be cleared to indicate init has completed
loop {
if !can1.msr.read().inak().bit() {
break;
}
delay.delay_ms(1000_u32);
write_string_to_serial(&mut tx, "Waiting for INAK to be cleared\n");
}
write_string_to_serial(&mut tx, "INAK cleared\n");
// Set to standard identifier
unsafe {
can1.tx[0]
.tir
.modify(|_, w| w.ide().standard().stid().bits(12));
}
unsafe {
can1.tx[0].tdtr.modify(|_, w| w.dlc().bits(8));
}
unsafe {
can1.tx[0].tdlr.write(|w| w.bits(0x04030201));
can1.tx[0].tdhr.write(|w| w.bits(0x08070605));
}
// Start transmission
can1.tx[0].tir.modify(|_, w| w.txrq().set_bit());
loop {
if can1.tx[0].tir.read().txrq().bit_is_clear() {
break;
}
}
loop {
led.set_high().unwrap();
delay.delay_ms(1000_u32);
led.set_low().unwrap();
delay.delay_ms(1000_u32);
}
}
loop {
// your code goes here
}
}
pub fn setup_can_gpio<X, Y>(rx: PD0<X>, tx: PD1<Y>) {
// CAN1 RX - PG0, TX - PG1
// CAN1 RX - PA11, TX - PA12
// CAN1 RX - PD0, TX - PD1 ---
// CAN1 RX - PB8, TX - PB9
// CAN2 RX - PB12, TX - PB13
// CAN2 RX - PG11, TX - PG12
// CAN2 RX - PB5, TX - PB6
// CAN3 RX - PA8, TX - PA15
// CAN3 RX - PB3, TX - PB4
// Use PD0 RX, PD1 TX
let _can_rx = rx.into_alternate_af9();
let _can_tx = tx.into_alternate_af9();
}
pub fn write_string_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
string: &str,
) {
write_bytes_to_serial(tx, string.as_bytes());
}
pub fn write_bytes_to_serial(
tx: &mut stm32f4xx_hal::serial::Tx<stm32f4::stm32f413::USART3>,
bytes: &[u8],
) {
for byte in bytes.iter() {
block!(tx.write(*byte)).unwrap();
}
}
pub fn
|
<X, Y>(
uart: USART3,
tx: PD8<X>,
rx: PD9<Y>,
baudrate: Bps,
clocks: Clocks,
) -> hal::serial::Tx<stm32f4::stm32f413::USART3> {
let config = Config {
baudrate,
..Config::default()
};
let tx = tx.into_alternate_af7();
let rx = rx.into_alternate_af7();
let serial = Serial::usart3(uart, (tx, rx), config, clocks).unwrap();
let (tx, _) = serial.split();
tx
}
// Can FLOW:
// CAN clocks are enabled in RCC_APB1ENR
// CAN1 RX - PG0, TX
|
configure
|
identifier_name
|
cfg.py
|
queen',
'prince',
'princess',
'angel',
'god',
'friend',
'ally',
'spouse',
'covenantor',
'phantom',
'dark spirit',
'bonfire',
'ember',
'fog wall',
'lever',
'contraption',
'key',
'trap',
'torch',
'door',
'treasure',
'chest',
'something',
'quite something',
'rubbish',
'filth',
'weapon',
'shield',
'projectile',
'armor',
'item',
'ring',
'ore',
'coal',
'transposing kiln',
'scroll',
'umbral ash',
'throne',
'rite',
'coffin',
'cinder',
'ash',
'moon',
'eye',
'brew',
'soup',
'message',
'bloodstain',
'illusion',
'close-ranged battle',
'ranged battle',
'eliminating one at a time',
'luring it out',
'beating to a pulp',
'ambush',
'pincer attack',
'hitting them in one swoop',
'duel-wielding',
'stealth',
'mimicry',
'fleeing',
'charging',
'jumping off',
'dashing through',
'circling around',
'trapping inside',
'rescue',
'skill',
'sorcery',
'pyromancy',
'miracles',
'pure luck',
'prudence',
'brief respite',
'play dead',
'jog',
'dash',
'rolling',
'backstepping',
'jumping',
'attacking',
'jump attack',
'dash attack',
'counter attack',
'stabbing in the back',
'guard stun & stab',
'plunging attack',
'shield breaking',
'blocking',
'parrying',
'locking-on',
'no lock-on',
'two-handing',
'gesture',
'control',
'destroy',
'boulder',
'lava',
'poison gas',
'enemy horde',
'forest',
'swamp',
'cave',
'shortcut',
'detour',
'hidden path',
'secret passage',
'dead end',
'labyrinth',
'hole',
'bright spot',
'dark spot',
'open area',
'tight spot',
'safe zone',
'danger zone',
'sniper spot',
'hiding place',
'illusory wall',
'ladder',
'lift',
'gorgeous view',
'looking away',
'overconfidence',
'slip-up',
'oversight',
'fatigue',
'bad luck',
'inattention',
'loss of stamina',
'chance encounter',
'planned encounter',
'front',
'back',
'left',
'right',
'up',
'down',
'below',
'above',
'behind',
'head',
'neck',
'stomach',
'back',
'armor',
'finger',
'leg',
'rear',
'tail',
'wings',
'anywhere',
'tongue',
'right arm',
'left arm',
'thumb',
'indexfinger',
'longfinger',
'ringfinger',
'smallfinger',
'right leg',
'left leg',
'right side',
'left side',
'pincer',
'wheel',
'core',
'mount',
'regular',
'strike',
'thrust',
'slash',
'magic',
'crystal',
'fire',
'chaos',
'lightning',
'blessing',
'dark',
'critical hits',
'bleeding',
'poison',
'toxic',
'frost',
'curse',
'equipment breakage',
'chance',
'quagmire',
'hint',
'secret',
'sleeptalk',
'happiness',
'misfortune',
'life',
'death',
'demise',
'joy',
'fury',
'agony',
'sadness',
'tears',
'loyalty',
'betrayal',
'hope',
'despair',
'fear',
'losing sanity',
'victory',
'defeat',
'sacrifice',
'light',
'dark',
'bravery',
'confidence',
'vigor',
'revenge',
'resignation',
'overwhelming',
'regret',
'pointless',
'man',
'woman',
'friendship',
'love',
'recklessness',
'composure',
'guts',
'comfort',
'silence',
'deep',
'dregs',
'good luck',
'fine work',
'i did it',
'i\'ve failed',
'here',
'not here',
'i can\'t take this',
'lonely',
'don\'t you dare',
'do it',
'look carefully',
'listen carefully',
'think carefully',
'this place again',
'now the real fight begins',
'you don\'t deserve this',
'keep moving',
'pull back',
'give it up',
'don\'t give up',
'help me',
'impossible',
'bloody expensive',
'let me out of here',
'stay calm',
'like a dream',
'seems familiar',
'are you ready',
'it\'ll happen to you too',
'praise the sun',
'may the flames guide thee',
'you\'ve come to the right place',
'bless us with blood',
'may the good blood guide your way',
'fear your blindness',
'the sky and the cosmos are one',
'let us cleanse these foul streets',
'you\'re in the know right',
'oh i can\'t wait hee hee',
'take a step forward',
'turn back',
'those with faith will be spared',
'don\'t be fooled',
'pitiful really',
'behind you',
'don\'t you dare look at me',
'sincerest thanks',
'a hunter is never alone',
'please carry on in my stead',
'run',
'man-beast',
'giant beast',
'abhorrent beast',
'infected one',
'foe',
'strong foe',
'giant foe',
'terrible foe',
'hound',
'bird',
'snake',
'animal',
'insect',
'watcher',
'shaman',
'dead',
'foul spirit',
'the lost',
'malformed thing',
'unknown thing',
'slimy thing',
'blobby thing',
'kin of the cosmos',
'evil eye',
'false god',
'superior being',
'messenger',
'doll',
'elderly',
'ailing one',
'madman',
'keeper',
'mob',
'wheelchair',
'small gent',
'small lady',
'titan',
'amazon',
'dullard',
'scoundrel',
'child',
'darling',
'infant',
'yourself',
'hunter',
'cooperator',
'adversary',
'executioner',
'vileblood',
'hunter of hunters',
'blood-addled hunter',
'physical attack',
'blunt attack',
'thrust attack',
'blood attack',
'arcane',
'bolt',
'quick weapon',
'long weapon',
'frenzy',
'exploiting species',
'beast transformation',
'firearm',
'blunderbuss',
'rally',
'charge attack',
'visceral attack',
'quickstep',
'blood vial',
'quicksilver bullet',
'medicine',
'special medicine',
'oil',
'coarse paper',
'special item',
'\"focus on attacks\"',
'sneak attack',
'patrol',
'reinforcements caller',
'\"focus on evasion\"',
'\"focus on healing\"',
'\"close-range fight\"',
'\"long-range fight\"',
'\"hit-and-run\"',
'sniping',
'counter',
'\"attack from behind\"',
'\"open when attacking\"',
'\"strike and be struck\"',
'\"kill in order\"',
'\"kill first\"',
'charging forth',
'lure',
'ignoring',
'retreat',
'use of terrain',
'high spot',
'fall',
'alertness',
'unbreakable will',
'leaden constitution',
'blood echoes',
'insight',
'bloodstone',
'blood gem',
'rune',
'ritual material',
'paleblood',
'rating',
'dead body',
'statue',
'footing',
'yharnam',
'clinic',
'grand cathedral',
'church',
'safe place',
'old labyrinth',
'workshop',
'healing church',
'unseen village',
'hunting',
'night',
'dawn',
'blood',
'warm blood',
'scourge',
|
'nightmare',
'cosmos',
'oedon',
'communion',
'donation',
|
random_line_split
|
|
automaton.go
|
transition with the specified source, dest, min, max.
func (r *Automaton) AddTransition(source, dest, min, max int) error {
//bounds := r.nextState / 2
r.growTransitions()
if r.curState != source {
if r.curState != -1 {
r.finishCurrentState()
}
// Move to next source:
r.curState = source
if r.states[2*r.curState] != -1 {
return fmt.Errorf("from state (%d) already had transitions added", source)
}
r.states[2*r.curState] = r.nextTransition
}
r.transitions[r.nextTransition] = dest
r.nextTransition++
r.transitions[r.nextTransition] = min
r.nextTransition++
r.transitions[r.nextTransition] = max
r.nextTransition++
// Increment transition count for this state
r.states[2*r.curState+1]++
return nil
}
// AddEpsilon Add a [virtual] epsilon transition between source and dest. Dest state must already have all
// transitions added because this method simply copies those same transitions over to source.
func (r *Automaton) AddEpsilon(source, dest int) {
t := Transition{}
count := r.InitTransition(dest, &t)
for i := 0; i < count; i++ {
r.GetNextTransition(&t)
_ = r.AddTransition(source, t.Dest, t.Min, t.Max)
}
if r.IsAccept(dest) {
r.SetAccept(source, true)
}
}
// Copy Copies over all states/transitions from other. The states numbers are sequentially assigned (appended).
func (r *Automaton) Copy(other *Automaton) {
// Bulk copy and then fixup the state pointers:
stateOffset := r.GetNumStates()
r.states = util.Grow(r.states, r.nextState+other.nextState)
copy(r.states[r.nextState:r.nextState+other.nextState], other.states)
for i := 0; i < other.nextState; i += 2 {
if r.states[r.nextState+i] != -1 {
r.states[r.nextState+i] += r.nextTransition
}
}
r.nextState += other.nextState
otherNumStates := other.GetNumStates()
otherAcceptStates := other.getAcceptStates()
state := uint(0)
for {
if state < uint(otherNumStates) {
if state, ok := otherAcceptStates.NextSet(state); ok {
r.SetAccept(stateOffset+int(state), true)
state++
continue
}
}
break
}
// Bulk copy and then fixup dest for each transition:
r.transitions = util.Grow(r.transitions, r.nextTransition+other.nextTransition)
copy(r.transitions[r.nextTransition:r.nextTransition+other.nextTransition], other.transitions)
for i := 0; i < other.nextTransition; i += 3 {
r.transitions[r.nextTransition+i] += stateOffset
}
r.nextTransition += other.nextTransition
if other.deterministic == false {
r.deterministic = false
}
}
// Freezes the last state, sorting and reducing the transitions.
func (r *Automaton) finishCurrentState() {
numTransitions := r.states[2*r.curState+1]
offset := r.states[2*r.curState]
start := offset / 3
sort.Sort(&destMinMaxSorter{
from: start,
to: start + numTransitions,
Automaton: r,
})
// Reduce any "adjacent" transitions:
upto := 0
minValue := -1
maxValue := -1
dest := -1
for i := 0; i < numTransitions; i++ {
tDest := r.transitions[offset+3*i]
tMin := r.transitions[offset+3*i+1]
tMax := r.transitions[offset+3*i+2]
if dest == tDest {
if tMin <= maxValue+1 {
if tMax > maxValue {
maxValue = tMax
}
} else {
if dest != -1 {
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
minValue = tMin
maxValue = tMax
}
} else {
if dest != -1 {
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
dest = tDest
minValue = tMin
maxValue = tMax
}
}
if dest != -1 {
// Last transition
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
r.nextTransition -= (numTransitions - upto) * 3
r.states[2*r.curState+1] = upto
// Sort transitions by minValue/maxValue/dest:
sort.Sort(&minMaxDestSorter{
from: start,
to: start + upto,
Automaton: r,
})
if r.deterministic && upto > 1 {
lastMax := r.transitions[offset+2]
for i := 1; i < upto; i++ {
minValue = r.transitions[offset+3*i+1]
if minValue <= lastMax {
r.deterministic = false
break
}
lastMax = r.transitions[offset+3*i+2]
}
}
}
// IsDeterministic Returns true if this automaton is deterministic (for ever state there is only one
// transition for each label).
func (r *Automaton) IsDeterministic() bool {
return r.deterministic
}
// Finishes the current state; call this once you are done adding transitions for a state.
// This is automatically called if you start adding transitions to a new source state,
// but for the last state you add you need to this method yourself.
func (r *Automaton) finishState() {
if r.curState != -1 {
r.finishCurrentState()
r.curState = -1
}
}
// GetNumStates How many states this automaton has.
func (r *Automaton) GetNumStates() int {
return r.nextState / 2
}
// GetNumTransitions How many transitions this automaton has.
func (r *Automaton) GetNumTransitions() int {
return r.nextTransition / 3
}
// GetNumTransitionsWithState How many transitions this state has.
func (r *Automaton) GetNumTransitionsWithState(state int) int {
count := r.states[2*state+1]
if count == -1 {
return 0
}
return count
}
func (r *Automaton) growStates()
|
func (r *Automaton) growTransitions() {
if r.nextTransition+3 > len(r.transitions) {
r.transitions = util.Grow(r.transitions, r.nextTransition+3)
}
}
// Sorts transitions by dest, ascending, then min label ascending, then max label ascending
type destMinMaxSorter struct {
from, to int
*Automaton
}
func (r *destMinMaxSorter) Len() int {
return r.to - r.from
}
func (r *destMinMaxSorter) Less(i, j int) bool {
iStart := 3 * i
jStart := 3 * j
iDest := r.transitions[iStart]
jDest := r.transitions[jStart]
// First dest:
if iDest < jDest {
return true
} else if iDest > jDest {
return false
}
// Then min:
iMin := r.transitions[iStart+1]
jMin := r.transitions[jStart+1]
if iMin < jMin {
return true
} else if iMin > jMin {
return false
}
// Then max:
iMax := r.transitions[iStart+2]
jMax := r.transitions[jStart+2]
if iMax < jMax {
return true
} else if iMax > jMax {
return false
}
return false
}
func (r *destMinMaxSorter) Swap(i, j int) {
iStart, jStart := 3*i, 3*j
r.swapOne(iStart, jStart)
r.swapOne(iStart+1, jStart+1)
r.swapOne(iStart+2, jStart+2)
}
func (r *destMinMaxSorter) swapOne(i, j int) {
r.transitions[i], r.transitions[j] =
r.transitions[j], r.transitions[i]
}
// Sorts transitions by min label, ascending, then max label ascending, then dest ascending
type minMaxDestSorter struct {
from, to int
*Automaton
}
func (r *minMaxDestSorter) Len() int {
return r.to - r.from
}
func (r *minMaxDestSorter) Less(i, j int) bool {
iStart := 3 * i
jStart :=
|
{
if r.nextState+2 > len(r.states) {
r.states = util.Grow(r.states, r.nextState+2)
}
}
|
identifier_body
|
automaton.go
|
new transition with the specified source, dest, min, max.
func (r *Automaton)
|
(source, dest, min, max int) error {
//bounds := r.nextState / 2
r.growTransitions()
if r.curState != source {
if r.curState != -1 {
r.finishCurrentState()
}
// Move to next source:
r.curState = source
if r.states[2*r.curState] != -1 {
return fmt.Errorf("from state (%d) already had transitions added", source)
}
r.states[2*r.curState] = r.nextTransition
}
r.transitions[r.nextTransition] = dest
r.nextTransition++
r.transitions[r.nextTransition] = min
r.nextTransition++
r.transitions[r.nextTransition] = max
r.nextTransition++
// Increment transition count for this state
r.states[2*r.curState+1]++
return nil
}
// AddEpsilon Add a [virtual] epsilon transition between source and dest. Dest state must already have all
// transitions added because this method simply copies those same transitions over to source.
func (r *Automaton) AddEpsilon(source, dest int) {
t := Transition{}
count := r.InitTransition(dest, &t)
for i := 0; i < count; i++ {
r.GetNextTransition(&t)
_ = r.AddTransition(source, t.Dest, t.Min, t.Max)
}
if r.IsAccept(dest) {
r.SetAccept(source, true)
}
}
// Copy Copies over all states/transitions from other. The states numbers are sequentially assigned (appended).
func (r *Automaton) Copy(other *Automaton) {
// Bulk copy and then fixup the state pointers:
stateOffset := r.GetNumStates()
r.states = util.Grow(r.states, r.nextState+other.nextState)
copy(r.states[r.nextState:r.nextState+other.nextState], other.states)
for i := 0; i < other.nextState; i += 2 {
if r.states[r.nextState+i] != -1 {
r.states[r.nextState+i] += r.nextTransition
}
}
r.nextState += other.nextState
otherNumStates := other.GetNumStates()
otherAcceptStates := other.getAcceptStates()
state := uint(0)
for {
if state < uint(otherNumStates) {
if state, ok := otherAcceptStates.NextSet(state); ok {
r.SetAccept(stateOffset+int(state), true)
state++
continue
}
}
break
}
// Bulk copy and then fixup dest for each transition:
r.transitions = util.Grow(r.transitions, r.nextTransition+other.nextTransition)
copy(r.transitions[r.nextTransition:r.nextTransition+other.nextTransition], other.transitions)
for i := 0; i < other.nextTransition; i += 3 {
r.transitions[r.nextTransition+i] += stateOffset
}
r.nextTransition += other.nextTransition
if other.deterministic == false {
r.deterministic = false
}
}
// Freezes the last state, sorting and reducing the transitions.
func (r *Automaton) finishCurrentState() {
numTransitions := r.states[2*r.curState+1]
offset := r.states[2*r.curState]
start := offset / 3
sort.Sort(&destMinMaxSorter{
from: start,
to: start + numTransitions,
Automaton: r,
})
// Reduce any "adjacent" transitions:
upto := 0
minValue := -1
maxValue := -1
dest := -1
for i := 0; i < numTransitions; i++ {
tDest := r.transitions[offset+3*i]
tMin := r.transitions[offset+3*i+1]
tMax := r.transitions[offset+3*i+2]
if dest == tDest {
if tMin <= maxValue+1 {
if tMax > maxValue {
maxValue = tMax
}
} else {
if dest != -1 {
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
minValue = tMin
maxValue = tMax
}
} else {
if dest != -1 {
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
dest = tDest
minValue = tMin
maxValue = tMax
}
}
if dest != -1 {
// Last transition
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
r.nextTransition -= (numTransitions - upto) * 3
r.states[2*r.curState+1] = upto
// Sort transitions by minValue/maxValue/dest:
sort.Sort(&minMaxDestSorter{
from: start,
to: start + upto,
Automaton: r,
})
if r.deterministic && upto > 1 {
lastMax := r.transitions[offset+2]
for i := 1; i < upto; i++ {
minValue = r.transitions[offset+3*i+1]
if minValue <= lastMax {
r.deterministic = false
break
}
lastMax = r.transitions[offset+3*i+2]
}
}
}
// IsDeterministic Returns true if this automaton is deterministic (for ever state there is only one
// transition for each label).
func (r *Automaton) IsDeterministic() bool {
return r.deterministic
}
// Finishes the current state; call this once you are done adding transitions for a state.
// This is automatically called if you start adding transitions to a new source state,
// but for the last state you add you need to this method yourself.
func (r *Automaton) finishState() {
if r.curState != -1 {
r.finishCurrentState()
r.curState = -1
}
}
// GetNumStates How many states this automaton has.
func (r *Automaton) GetNumStates() int {
return r.nextState / 2
}
// GetNumTransitions How many transitions this automaton has.
func (r *Automaton) GetNumTransitions() int {
return r.nextTransition / 3
}
// GetNumTransitionsWithState How many transitions this state has.
func (r *Automaton) GetNumTransitionsWithState(state int) int {
count := r.states[2*state+1]
if count == -1 {
return 0
}
return count
}
func (r *Automaton) growStates() {
if r.nextState+2 > len(r.states) {
r.states = util.Grow(r.states, r.nextState+2)
}
}
func (r *Automaton) growTransitions() {
if r.nextTransition+3 > len(r.transitions) {
r.transitions = util.Grow(r.transitions, r.nextTransition+3)
}
}
// Sorts transitions by dest, ascending, then min label ascending, then max label ascending
type destMinMaxSorter struct {
from, to int
*Automaton
}
func (r *destMinMaxSorter) Len() int {
return r.to - r.from
}
func (r *destMinMaxSorter) Less(i, j int) bool {
iStart := 3 * i
jStart := 3 * j
iDest := r.transitions[iStart]
jDest := r.transitions[jStart]
// First dest:
if iDest < jDest {
return true
} else if iDest > jDest {
return false
}
// Then min:
iMin := r.transitions[iStart+1]
jMin := r.transitions[jStart+1]
if iMin < jMin {
return true
} else if iMin > jMin {
return false
}
// Then max:
iMax := r.transitions[iStart+2]
jMax := r.transitions[jStart+2]
if iMax < jMax {
return true
} else if iMax > jMax {
return false
}
return false
}
func (r *destMinMaxSorter) Swap(i, j int) {
iStart, jStart := 3*i, 3*j
r.swapOne(iStart, jStart)
r.swapOne(iStart+1, jStart+1)
r.swapOne(iStart+2, jStart+2)
}
func (r *destMinMaxSorter) swapOne(i, j int) {
r.transitions[i], r.transitions[j] =
r.transitions[j], r.transitions[i]
}
// Sorts transitions by min label, ascending, then max label ascending, then dest ascending
type minMaxDestSorter struct {
from, to int
*Automaton
}
func (r *minMaxDestSorter) Len() int {
return r.to - r.from
}
func (r *minMaxDestSorter) Less(i, j int) bool {
iStart := 3 * i
jStart :=
|
AddTransition
|
identifier_name
|
automaton.go
|
transition with the specified source, dest, min, max.
func (r *Automaton) AddTransition(source, dest, min, max int) error {
//bounds := r.nextState / 2
r.growTransitions()
if r.curState != source {
if r.curState != -1 {
r.finishCurrentState()
}
// Move to next source:
r.curState = source
if r.states[2*r.curState] != -1 {
return fmt.Errorf("from state (%d) already had transitions added", source)
}
r.states[2*r.curState] = r.nextTransition
}
r.transitions[r.nextTransition] = dest
r.nextTransition++
r.transitions[r.nextTransition] = min
r.nextTransition++
r.transitions[r.nextTransition] = max
r.nextTransition++
// Increment transition count for this state
r.states[2*r.curState+1]++
return nil
}
// AddEpsilon Add a [virtual] epsilon transition between source and dest. Dest state must already have all
// transitions added because this method simply copies those same transitions over to source.
func (r *Automaton) AddEpsilon(source, dest int) {
t := Transition{}
count := r.InitTransition(dest, &t)
for i := 0; i < count; i++ {
r.GetNextTransition(&t)
_ = r.AddTransition(source, t.Dest, t.Min, t.Max)
}
if r.IsAccept(dest) {
r.SetAccept(source, true)
}
}
// Copy Copies over all states/transitions from other. The states numbers are sequentially assigned (appended).
func (r *Automaton) Copy(other *Automaton) {
// Bulk copy and then fixup the state pointers:
stateOffset := r.GetNumStates()
r.states = util.Grow(r.states, r.nextState+other.nextState)
copy(r.states[r.nextState:r.nextState+other.nextState], other.states)
for i := 0; i < other.nextState; i += 2 {
if r.states[r.nextState+i] != -1 {
r.states[r.nextState+i] += r.nextTransition
}
}
r.nextState += other.nextState
otherNumStates := other.GetNumStates()
otherAcceptStates := other.getAcceptStates()
state := uint(0)
for {
if state < uint(otherNumStates) {
if state, ok := otherAcceptStates.NextSet(state); ok {
r.SetAccept(stateOffset+int(state), true)
state++
continue
}
}
break
}
// Bulk copy and then fixup dest for each transition:
r.transitions = util.Grow(r.transitions, r.nextTransition+other.nextTransition)
copy(r.transitions[r.nextTransition:r.nextTransition+other.nextTransition], other.transitions)
for i := 0; i < other.nextTransition; i += 3 {
r.transitions[r.nextTransition+i] += stateOffset
}
r.nextTransition += other.nextTransition
if other.deterministic == false {
r.deterministic = false
}
}
// Freezes the last state, sorting and reducing the transitions.
func (r *Automaton) finishCurrentState() {
numTransitions := r.states[2*r.curState+1]
offset := r.states[2*r.curState]
start := offset / 3
sort.Sort(&destMinMaxSorter{
from: start,
to: start + numTransitions,
Automaton: r,
})
// Reduce any "adjacent" transitions:
upto := 0
minValue := -1
maxValue := -1
dest := -1
for i := 0; i < numTransitions; i++ {
tDest := r.transitions[offset+3*i]
tMin := r.transitions[offset+3*i+1]
tMax := r.transitions[offset+3*i+2]
if dest == tDest {
if tMin <= maxValue+1 {
if tMax > maxValue {
maxValue = tMax
}
} else {
if dest != -1 {
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
minValue = tMin
maxValue = tMax
}
} else {
if dest != -1 {
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
dest = tDest
minValue = tMin
maxValue = tMax
}
}
if dest != -1 {
// Last transition
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
r.nextTransition -= (numTransitions - upto) * 3
r.states[2*r.curState+1] = upto
// Sort transitions by minValue/maxValue/dest:
sort.Sort(&minMaxDestSorter{
from: start,
to: start + upto,
Automaton: r,
})
if r.deterministic && upto > 1 {
lastMax := r.transitions[offset+2]
for i := 1; i < upto; i++ {
minValue = r.transitions[offset+3*i+1]
if minValue <= lastMax {
r.deterministic = false
break
}
lastMax = r.transitions[offset+3*i+2]
}
}
}
// IsDeterministic Returns true if this automaton is deterministic (for ever state there is only one
// transition for each label).
func (r *Automaton) IsDeterministic() bool {
return r.deterministic
}
// Finishes the current state; call this once you are done adding transitions for a state.
// This is automatically called if you start adding transitions to a new source state,
// but for the last state you add you need to this method yourself.
func (r *Automaton) finishState() {
if r.curState != -1 {
r.finishCurrentState()
r.curState = -1
}
}
// GetNumStates How many states this automaton has.
func (r *Automaton) GetNumStates() int {
return r.nextState / 2
}
// GetNumTransitions How many transitions this automaton has.
func (r *Automaton) GetNumTransitions() int {
return r.nextTransition / 3
}
// GetNumTransitionsWithState How many transitions this state has.
func (r *Automaton) GetNumTransitionsWithState(state int) int {
count := r.states[2*state+1]
if count == -1 {
return 0
}
return count
}
func (r *Automaton) growStates() {
if r.nextState+2 > len(r.states) {
r.states = util.Grow(r.states, r.nextState+2)
}
}
func (r *Automaton) growTransitions() {
if r.nextTransition+3 > len(r.transitions) {
r.transitions = util.Grow(r.transitions, r.nextTransition+3)
}
}
// Sorts transitions by dest, ascending, then min label ascending, then max label ascending
type destMinMaxSorter struct {
from, to int
*Automaton
}
func (r *destMinMaxSorter) Len() int {
return r.to - r.from
}
func (r *destMinMaxSorter) Less(i, j int) bool {
iStart := 3 * i
jStart := 3 * j
iDest := r.transitions[iStart]
jDest := r.transitions[jStart]
// First dest:
if iDest < jDest {
return true
} else if iDest > jDest {
return false
}
// Then min:
iMin := r.transitions[iStart+1]
jMin := r.transitions[jStart+1]
if iMin < jMin {
return true
} else if iMin > jMin {
return false
}
// Then max:
iMax := r.transitions[iStart+2]
jMax := r.transitions[jStart+2]
if iMax < jMax {
return true
} else if iMax > jMax
|
return false
}
func (r *destMinMaxSorter) Swap(i, j int) {
iStart, jStart := 3*i, 3*j
r.swapOne(iStart, jStart)
r.swapOne(iStart+1, jStart+1)
r.swapOne(iStart+2, jStart+2)
}
func (r *destMinMaxSorter) swapOne(i, j int) {
r.transitions[i], r.transitions[j] =
r.transitions[j], r.transitions[i]
}
// Sorts transitions by min label, ascending, then max label ascending, then dest ascending
type minMaxDestSorter struct {
from, to int
*Automaton
}
func (r *minMaxDestSorter) Len() int {
return r.to - r.from
}
func (r *minMaxDestSorter) Less(i, j int) bool {
iStart := 3 * i
jStart :=
|
{
return false
}
|
conditional_block
|
automaton.go
|
func NewAutomatonV1(numStates, numTransitions int) *Automaton {
return &Automaton{
curState: -1,
deterministic: true,
states: make([]int, numStates*2),
isAccept: bitset.New(uint(numStates)),
transitions: make([]int, numTransitions*3),
}
}
// CreateState Create a new state.
func (r *Automaton) CreateState() int {
r.growStates()
state := r.nextState / 2
r.states[r.nextState] = -1
r.nextState += 2
return state
}
// SetAccept Set or clear this state as an accept state.
func (r *Automaton) SetAccept(state int, accept bool) {
r.isAccept.SetTo(uint(state), accept)
}
// Sugar to get all transitions for all states. This is object-heavy; it's better to iterate state by state instead.
func (r *Automaton) getSortedTransitions() [][]Transition {
numStates := r.GetNumStates()
transitions := make([][]Transition, numStates)
for s := 0; s < numStates; s++ {
numTransitions := r.GetNumTransitionsWithState(s)
transitions[s] = make([]Transition, numTransitions)
for t := 0; t < numTransitions; t++ {
transition := Transition{}
r.getTransition(s, t, &transition)
transitions[s][t] = transition
}
}
return transitions
}
// Returns accept states. If the bit is set then that state is an accept state.
func (r *Automaton) getAcceptStates() *bitset.BitSet {
return r.isAccept
}
// IsAccept Returns true if this state is an accept state.
func (r *Automaton) IsAccept(state int) bool {
return r.isAccept.Test(uint(state))
}
// AddTransitionLabel Add a new transition with min = max = label.
func (r *Automaton) AddTransitionLabel(source, dest, label int) error {
return r.AddTransition(source, dest, label, label)
}
// AddTransition Add a new transition with the specified source, dest, min, max.
func (r *Automaton) AddTransition(source, dest, min, max int) error {
//bounds := r.nextState / 2
r.growTransitions()
if r.curState != source {
if r.curState != -1 {
r.finishCurrentState()
}
// Move to next source:
r.curState = source
if r.states[2*r.curState] != -1 {
return fmt.Errorf("from state (%d) already had transitions added", source)
}
r.states[2*r.curState] = r.nextTransition
}
r.transitions[r.nextTransition] = dest
r.nextTransition++
r.transitions[r.nextTransition] = min
r.nextTransition++
r.transitions[r.nextTransition] = max
r.nextTransition++
// Increment transition count for this state
r.states[2*r.curState+1]++
return nil
}
// AddEpsilon Add a [virtual] epsilon transition between source and dest. Dest state must already have all
// transitions added because this method simply copies those same transitions over to source.
func (r *Automaton) AddEpsilon(source, dest int) {
t := Transition{}
count := r.InitTransition(dest, &t)
for i := 0; i < count; i++ {
r.GetNextTransition(&t)
_ = r.AddTransition(source, t.Dest, t.Min, t.Max)
}
if r.IsAccept(dest) {
r.SetAccept(source, true)
}
}
// Copy Copies over all states/transitions from other. The states numbers are sequentially assigned (appended).
func (r *Automaton) Copy(other *Automaton) {
// Bulk copy and then fixup the state pointers:
stateOffset := r.GetNumStates()
r.states = util.Grow(r.states, r.nextState+other.nextState)
copy(r.states[r.nextState:r.nextState+other.nextState], other.states)
for i := 0; i < other.nextState; i += 2 {
if r.states[r.nextState+i] != -1 {
r.states[r.nextState+i] += r.nextTransition
}
}
r.nextState += other.nextState
otherNumStates := other.GetNumStates()
otherAcceptStates := other.getAcceptStates()
state := uint(0)
for {
if state < uint(otherNumStates) {
if state, ok := otherAcceptStates.NextSet(state); ok {
r.SetAccept(stateOffset+int(state), true)
state++
continue
}
}
break
}
// Bulk copy and then fixup dest for each transition:
r.transitions = util.Grow(r.transitions, r.nextTransition+other.nextTransition)
copy(r.transitions[r.nextTransition:r.nextTransition+other.nextTransition], other.transitions)
for i := 0; i < other.nextTransition; i += 3 {
r.transitions[r.nextTransition+i] += stateOffset
}
r.nextTransition += other.nextTransition
if other.deterministic == false {
r.deterministic = false
}
}
// Freezes the last state, sorting and reducing the transitions.
func (r *Automaton) finishCurrentState() {
numTransitions := r.states[2*r.curState+1]
offset := r.states[2*r.curState]
start := offset / 3
sort.Sort(&destMinMaxSorter{
from: start,
to: start + numTransitions,
Automaton: r,
})
// Reduce any "adjacent" transitions:
upto := 0
minValue := -1
maxValue := -1
dest := -1
for i := 0; i < numTransitions; i++ {
tDest := r.transitions[offset+3*i]
tMin := r.transitions[offset+3*i+1]
tMax := r.transitions[offset+3*i+2]
if dest == tDest {
if tMin <= maxValue+1 {
if tMax > maxValue {
maxValue = tMax
}
} else {
if dest != -1 {
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
minValue = tMin
maxValue = tMax
}
} else {
if dest != -1 {
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
dest = tDest
minValue = tMin
maxValue = tMax
}
}
if dest != -1 {
// Last transition
r.transitions[offset+3*upto] = dest
r.transitions[offset+3*upto+1] = minValue
r.transitions[offset+3*upto+2] = maxValue
upto++
}
r.nextTransition -= (numTransitions - upto) * 3
r.states[2*r.curState+1] = upto
// Sort transitions by minValue/maxValue/dest:
sort.Sort(&minMaxDestSorter{
from: start,
to: start + upto,
Automaton: r,
})
if r.deterministic && upto > 1 {
lastMax := r.transitions[offset+2]
for i := 1; i < upto; i++ {
minValue = r.transitions[offset+3*i+1]
if minValue <= lastMax {
r.deterministic = false
break
}
lastMax = r.transitions[offset+3*i+2]
}
}
}
// IsDeterministic Returns true if this automaton is deterministic (for ever state there is only one
// transition for each label).
func (r *Automaton) IsDeterministic() bool {
return r.deterministic
}
// Finishes the current state; call this once you are done adding transitions for a state.
// This is automatically called if you start adding transitions to a new source state,
// but for the last state you add you need to this method yourself.
func (r *Automaton) finishState() {
if r.curState != -1 {
r.finishCurrentState()
r.curState = -1
}
}
// GetNumStates How many states this automaton has.
func (r *Automaton) GetNumStates() int {
return r.nextState / 2
}
// GetNumTransitions How many transitions this automaton has.
func (r *Automaton) GetNumTransitions() int {
return r.nextTransition / 3
}
// GetNumTransitionsWithState How many transitions this state has.
func (r *Automaton) GetNumTransitionsWithState(state int) int {
count := r.states[2*state+1]
if count == -1 {
return 0
}
return count
}
func (r *Automaton) growStates() {
if r.nextState+2 > len(r.states) {
r.states = util.Grow(r.states, r.nextState+2)
}
}
func (r *Automaton) growTransitions() {
if r.nextTransition+3 > len(r.transitions) {
r
|
random_line_split
|
||
main.rs
|
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinueIdent;
} else {
// Punct
match state {
LexerState::Start | LexerState::ContinuePunct => {}
LexerState::ContinueIdent => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinuePunct;
}
}
if !curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
res
}
/// Turns all identifiers and digits into a single token.
fn generalize(s: &str) -> &str {
const KEYWORDS: &[&str] = &[
"_",
"as",
"break",
"const",
"continue",
"crate",
"else",
"enum",
"extern",
"false",
"fn",
"for",
"if",
"impl",
"in",
"let",
"loop",
"match",
"mod",
"move",
"mut",
"pub",
"ref",
"return",
"self",
"Self",
"static",
"struct",
"super",
"trait",
"true",
"type",
"unsafe",
"use",
"where",
"while",
"abstract",
"become",
"box",
"do",
"final",
"macro",
"override",
"priv",
"typeof",
"unsized",
"virtual",
"yield",
"async",
"await",
"dyn",
"try",
"auto",
"catch",
"default",
"macro_rules",
"raw",
"union",
];
let first_char = s.chars().next().unwrap();
if is_id_continue(first_char) && !KEYWORDS.contains(&s) {
if is_id_start(first_char) { "и" } else { "ц" }
} else {
s
}
}
/// Turn tokens of a test into features (in their index representation).
/// Tokens, "generalized" tokens, and their bigrams and trigrams are used as features.
fn tokens_to_features(
feature_map: &mut FeatureMap,
tokens: &[String],
read_only: bool,
) -> Vec<u32> {
let mut res = Vec::new();
let mut push = |token| {
if let Some(feat) = feature_map.intern(token, read_only) {
res.push(feat);
}
};
for token in tokens {
push(token.into());
push(generalize(token).into());
}
for [token1, token2] in tokens.array_windows() {
push(format!("{} {}", token1, token2).into());
push(format!("{} {}", generalize(token1), generalize(token2)).into());
}
for [token1, _, token3] in tokens.array_windows() {
push(format!("{} {}", token1, token3).into());
push(format!("{} {}", generalize(token1), generalize(token3)).into());
}
for [token1, token2, token3] in tokens.array_windows() {
push(format!("{} {} {}", token1, token2, token3).into());
push(
format!("{} {} {}", generalize(token1), generalize(token2), generalize(token3)).into(),
);
}
res.sort_unstable();
res.dedup();
res
}
/// Merge features from `foo.rs` and `foo.stderr` into a single feature vector
/// that corresponds to a single test case including multiple files.
fn files_to_tests(files: HashMap<String, RefCell<Vec<u32>>>) -> HashMap<String, Vec<u32>> {
let mut res = HashMap::default();
for (name, features) in &files {
let mut key = name.to_string();
let prefix = if let prefix @ Some(_) = name.strip_suffix(".nll.stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stdout") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".fixed") {
prefix
} else {
None
};
if let Some(prefix) = prefix {
let normalized = prefix.to_string() + ".rs";
if files.contains_key(&normalized) {
key = normalized;
}
}
merge_features(res.entry(key).or_default(), &mut features.borrow_mut());
}
res
}
fn merge_features(dst: &mut Vec<u32>, src: &mut Vec<u32>) {
dst.append(src);
dst.sort_unstable();
dst.dedup();
}
/// Dot product of weight vector from the trained linear model
/// and feature vector from a new test case that needs to be classified.
/// Both vectors are sparse.
fn get_decision_value(m: &[(u32, f64)], x: &[u32]) -> f64 {
let mut res = 0.0;
for index in x {
match m.binary_search_by_key(index, |node| node.0) {
Ok(i) => res += m[i].1,
Err(..) => {}
}
}
res
}
/// Train classifier and write it to `model.json`.
fn train(root: &Path) -> Result<(), Box<dyn Error>> {
const EXCLUDED_SUBDIRS: &[&str] =
&["auxiliary", "bad", "did_you_mean", "error-codes", "issues", "rfcs", "span"];
// Build feature vectors for already classified tests.
let mut feature_map = FeatureMap::default();
feature_map.features.push(String::new()); // feature indices must start with 1
let mut class_vectors = Vec::new();
for top_entry in fs::read_dir(root)? {
let top_entry = top_entry?;
if !top_entry.file_type()?.is_dir()
|| EXCLUDED_SUBDIRS.contains(&top_entry.file_name().to_str().unwrap())
{
continue;
}
let top_path = top_entry.path();
let class = top_path.file_name().unwrap().to_str().unwrap();
let mut files = HashMap::default();
for entry in
WalkDir::new(&top_path).into_iter().filter_entry(|e| e.file_name() != "auxiliary")
{
let entry = entry?;
if !entry.file_type().is_dir() {
let path = entry.path();
if let Ok(s) = fs::read_to_string(path) {
let file_name =
path.strip_prefix(root)?.display().to_string().replace("\\", "/");
let features = tokens_to_features(&mut feature_map, &tokenize(&s), false);
files.insert(file_name, RefCell::new(features));
}
}
}
class_vectors.push((class.to_owned(), files_to_tests(files)));
}
// Turn feature vectors into input for liblinear.
let mut labels = Vec::new();
let mut features = Vec::new();
for (class_idx, (_, vectors)) in class_vectors.iter().enumerate() {
for (_, vector) in vectors {
labels.push(class_idx as f64);
features.push(vector.iter().copied().map(|i| (i, 1.0)).collect());
}
}
let input_data =
TrainingInput::from_sparse_features(labels, features).map_err(|e| e.to_string())?;
// Train liblinear model.
let mut builder = LiblinearBuilder::new();
builder.problem().input_data(input_data);
builder.parameters().solver_type(SolverType::L1R_L2LOSS_SVC);
let liblinear_model = builder.build_model()?;
// Convert the trained model into sparse representation.
let mut classes = HashMap::default();
let mut used_features = HashSet::default();
for (class_idx, (class_name, _)) in class_vectors.iter().enumerate() {
let class_idx = i32::try_from(class_idx).unwrap();
let mut weights = Vec::new();
for feature_index in 1..i32::try_from(liblinear_model.num_features()).unwrap() + 1 {
let weight = liblinear_model.feature_coefficient(feature_index, class_idx);
if weight != 0.0 {
let index = u32::try_from(feature_index).unwrap();
weights.push((index, weight));
used_features.insert(index);
}
}
classes.insert(class_name.clone(), weights);
}
// Throw away features that ended up unused from the table.
let features =
feature_map.map.into_iter().filter(|(_, index)| used_features.contains(index)).collect();
// Write the model into file.
// FIXME: Make the output model file configurable.
let model = Model { features, classes };
let model_str = serde_json::to_string(&model)?;
fs::write("model.json", model_str)?;
Ok(())
}
/// Read classifier from `model.json` and use it to classify tests.
fn classify(root: &Path) -> Result<(), Box<dyn Error>> {
// Read the model from file.
// FIXME: Make the input model file configurable.
let model_str = fs::read_to_string("model.json")?;
|
let mut model: Model = serde_json::from_str(&model_str)?;
|
random_line_split
|
|
main.rs
|
}
impl ClassifiedTest {
fn max_score(&self) -> f64 {
self.class_scores[0].1
}
}
fn is_id_start(c: char) -> bool {
// This is XID_Start OR '_' (which formally is not a XID_Start).
// We also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_start(c))
}
fn is_id_continue(c: char) -> bool {
// This is exactly XID_Continue.
// We also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| ('0'..='9').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_continue(c))
}
/// Turn text of a test into tokens.
fn tokenize(s: &str) -> Vec<String> {
let mut state = LexerState::Start;
let mut res = Vec::new();
let mut curtok = String::new();
for c in s.chars() {
if c.is_whitespace() {
if !curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
state = LexerState::Start;
} else if is_id_continue(c) {
match state {
LexerState::Start | LexerState::ContinueIdent => {}
LexerState::ContinuePunct => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinueIdent;
} else {
// Punct
match state {
LexerState::Start | LexerState::ContinuePunct => {}
LexerState::ContinueIdent => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinuePunct;
}
}
if !curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
res
}
/// Turns all identifiers and digits into a single token.
fn generalize(s: &str) -> &str {
const KEYWORDS: &[&str] = &[
"_",
"as",
"break",
"const",
"continue",
"crate",
"else",
"enum",
"extern",
"false",
"fn",
"for",
"if",
"impl",
"in",
"let",
"loop",
"match",
"mod",
"move",
"mut",
"pub",
"ref",
"return",
"self",
"Self",
"static",
"struct",
"super",
"trait",
"true",
"type",
"unsafe",
"use",
"where",
"while",
"abstract",
"become",
"box",
"do",
"final",
"macro",
"override",
"priv",
"typeof",
"unsized",
"virtual",
"yield",
"async",
"await",
"dyn",
"try",
"auto",
"catch",
"default",
"macro_rules",
"raw",
"union",
];
let first_char = s.chars().next().unwrap();
if is_id_continue(first_char) && !KEYWORDS.contains(&s) {
if is_id_start(first_char) { "и" } else { "ц" }
} else {
s
}
}
/// Turn tokens of a test into features (in their index representation).
/// Tokens, "generalized" tokens, and their bigrams and trigrams are used as features.
fn tokens_to_features(
feature_map: &mut FeatureMap,
tokens: &[String],
read_only: bool,
) -> Vec<u32> {
let mut res = Vec::new();
let mut push = |token| {
if let Some(feat) = feature_map.intern(token, read_only) {
res.push(feat);
}
};
for token in tokens {
push(token.into());
push(generalize(token).into());
}
for [token1, token2] in tokens.array_windows() {
push(format!("{} {}", token1, token2).into());
push(format!("{} {}", generalize(token1), generalize(token2)).into());
}
for [token1, _, token3] in tokens.array_windows() {
push(format!("{} {}", token1, token3).into());
push(format!("{} {}", generalize(token1), generalize(token3)).into());
}
for [token1, token2, token3] in tokens.array_windows() {
push(format!("{} {} {}", token1, token2, token3).into());
push(
format!("{} {} {}", generalize(token1), generalize(token2), generalize(token3)).into(),
);
}
res.sort_unstable();
res.dedup();
res
}
/// Merge features from `foo.rs` and `foo.stderr` into a single feature vector
/// that corresponds to a single test case including multiple files.
fn files_to_tests(files: HashMap<String, RefCell<Vec<u32>>>) -> HashMap<String, Vec<u32>> {
let mut res = HashMap::default();
for (name, features) in &files {
let mut key = name.to_string();
let prefix = if let prefix @ Some(_) = name.strip_suffix(".nll.stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stdout") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".fixed") {
prefix
} else {
None
};
if let Some(prefix) = prefix {
let normalized = prefix.to_string() + ".rs";
if files.contains_key(&normalized) {
key = normalized;
}
}
merge_features(res.entry(key).or_default(), &mut features.borrow_mut());
}
res
}
fn merge_features(dst: &mut Vec<u32>, src: &mut Vec<u32>) {
dst.append(src);
dst.sort_unstable();
dst.dedup();
}
/// Dot product of weight vector from the trained linear model
/// and feature vector from a new test case that needs to be classified.
/// Both vectors are sparse.
fn get_decision_value(m: &[(u32, f64)], x: &[u32]) -> f64 {
let mut res = 0.0;
for index in x {
match m.binary_search_by_key(index, |node| node.0) {
Ok(i) => res += m[i].1,
Err(..) => {}
}
}
res
}
/// Train classifier and write it to `model.json`.
fn train(root: &Path) -> Result<(), Box<dyn Error>> {
const EXCLUDED_SUBDIRS: &[&str] =
&["auxiliary", "bad", "did_you_mean", "error-codes", "issues", "rfcs", "span"];
// Build feature vectors for already classified tests.
let mut feature_map = FeatureMap::default();
feature_map.features.push(String::new()); // feature indices must start with 1
let mut class_vectors = Vec::new();
for top_entry in fs::read_dir(root)? {
let top_entry = top_entry?;
if !top_entry.file_type()?.is_dir()
|| EXCLUDED_SUBDIRS.contains(&top_entry.file_name().to_str().unwrap())
{
continue;
}
let top_path = top_entry.path();
let class = top_path.file_name().unwrap().to_str().unwrap();
let mut files = HashMap::default();
for entry in
WalkDir::new(&top_path).into_iter().filter_entry(|e| e.file_name() != "auxiliary")
{
let entry = entry?;
if !entry.file_type().is_dir() {
let path = entry.path();
if let Ok(s) = fs::read_to_string(path) {
let file_name =
path.strip_prefix(root)?.display().to_string().replace("\\", "/");
let features = tokens_to_features(&mut feature_map, &tokenize(&s), false);
files.insert(file_name, RefCell::new(features));
}
}
}
class_vectors.push((class.to_owned(), files_to_tests(files)));
}
// Turn feature vectors into input for liblinear.
let mut labels = Vec::new();
let mut features = Vec::new();
for (class_idx, (_, vectors)) in class_vectors.iter().enumerate() {
for (_, vector) in vectors {
labels.push(class_idx as f64);
|
{
if let Some(index) = self.map.get(&*feature) {
Some(*index)
} else if read_only {
None
} else {
let new_index = u32::try_from(self.features.len()).unwrap();
self.features.push(feature.clone().into_owned());
self.map.insert(feature.into_owned(), new_index);
Some(new_index)
}
}
|
identifier_body
|
|
main.rs
|
also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_start(c))
}
fn is_id_continue(c: char) -> bool {
// This is exactly XID_Continue.
// We also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| ('0'..='9').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_continue(c))
}
/// Turn text of a test into tokens.
fn tokenize(s: &str) -> Vec<String> {
let mut state = LexerState::Start;
let mut res = Vec::new();
let mut curtok = String::new();
for c in s.chars() {
if c.is_whitespace() {
if !curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
state = LexerState::Start;
} else if is_id_continue(c) {
match state {
LexerState::Start | LexerState::ContinueIdent => {}
LexerState::ContinuePunct => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinueIdent;
} else {
// Punct
match state {
LexerState::Start | LexerState::ContinuePunct => {}
LexerState::ContinueIdent => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinuePunct;
}
}
if !curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
res
}
/// Turns all identifiers and digits into a single token.
fn generalize(s: &str) -> &str {
const KEYWORDS: &[&str] = &[
"_",
"as",
"break",
"const",
"continue",
"crate",
"else",
"enum",
"extern",
"false",
"fn",
"for",
"if",
"impl",
"in",
"let",
"loop",
"match",
"mod",
"move",
"mut",
"pub",
"ref",
"return",
"self",
"Self",
"static",
"struct",
"super",
"trait",
"true",
"type",
"unsafe",
"use",
"where",
"while",
"abstract",
"become",
"box",
"do",
"final",
"macro",
"override",
"priv",
"typeof",
"unsized",
"virtual",
"yield",
"async",
"await",
"dyn",
"try",
"auto",
"catch",
"default",
"macro_rules",
"raw",
"union",
];
let first_char = s.chars().next().unwrap();
if is_id_continue(first_char) && !KEYWORDS.contains(&s) {
if is_id_start(first_char) { "и" } else { "ц" }
} else {
s
}
}
/// Turn tokens of a test into features (in their index representation).
/// Tokens, "generalized" tokens, and their bigrams and trigrams are used as features.
fn tokens_to_features(
feature_map: &mut FeatureMap,
tokens: &[String],
read_only: bool,
) -> Vec<u32> {
let mut res = Vec::new();
let mut push = |token| {
if let Some(feat) = feature_map.intern(token, read_only) {
res.push(feat);
}
};
for token in tokens {
push(token.into());
push(generalize(token).into());
}
for [token1, token2] in tokens.array_windows() {
push(format!("{} {}", token1, token2).into());
push(format!("{} {}", generalize(token1), generalize(token2)).into());
}
for [token1, _, token3] in tokens.array_windows() {
push(format!("{} {}", token1, token3).into());
push(format!("{} {}", generalize(token1), generalize(token3)).into());
}
for [token1, token2, token3] in tokens.array_windows() {
push(format!("{} {} {}", token1, token2, token3).into());
push(
format!("{} {} {}", generalize(token1), generalize(token2), generalize(token3)).into(),
);
}
res.sort_unstable();
res.dedup();
res
}
/// Merge features from `foo.rs` and `foo.stderr` into a single feature vector
/// that corresponds to a single test case including multiple files.
fn files_to_tests(files: HashMap<String, RefCell<Vec<u32>>>) -> HashMap<String, Vec<u32>> {
let mut res = HashMap::default();
for (name, features) in &files {
let mut key = name.to_string();
let prefix = if let prefix @ Some(_) = name.strip_suffix(".nll.stderr") {
|
lse if let prefix @ Some(_) = name.strip_suffix(".stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stdout") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".fixed") {
prefix
} else {
None
};
if let Some(prefix) = prefix {
let normalized = prefix.to_string() + ".rs";
if files.contains_key(&normalized) {
key = normalized;
}
}
merge_features(res.entry(key).or_default(), &mut features.borrow_mut());
}
res
}
fn merge_features(dst: &mut Vec<u32>, src: &mut Vec<u32>) {
dst.append(src);
dst.sort_unstable();
dst.dedup();
}
/// Dot product of weight vector from the trained linear model
/// and feature vector from a new test case that needs to be classified.
/// Both vectors are sparse.
fn get_decision_value(m: &[(u32, f64)], x: &[u32]) -> f64 {
let mut res = 0.0;
for index in x {
match m.binary_search_by_key(index, |node| node.0) {
Ok(i) => res += m[i].1,
Err(..) => {}
}
}
res
}
/// Train classifier and write it to `model.json`.
fn train(root: &Path) -> Result<(), Box<dyn Error>> {
const EXCLUDED_SUBDIRS: &[&str] =
&["auxiliary", "bad", "did_you_mean", "error-codes", "issues", "rfcs", "span"];
// Build feature vectors for already classified tests.
let mut feature_map = FeatureMap::default();
feature_map.features.push(String::new()); // feature indices must start with 1
let mut class_vectors = Vec::new();
for top_entry in fs::read_dir(root)? {
let top_entry = top_entry?;
if !top_entry.file_type()?.is_dir()
|| EXCLUDED_SUBDIRS.contains(&top_entry.file_name().to_str().unwrap())
{
continue;
}
let top_path = top_entry.path();
let class = top_path.file_name().unwrap().to_str().unwrap();
let mut files = HashMap::default();
for entry in
WalkDir::new(&top_path).into_iter().filter_entry(|e| e.file_name() != "auxiliary")
{
let entry = entry?;
if !entry.file_type().is_dir() {
let path = entry.path();
if let Ok(s) = fs::read_to_string(path) {
let file_name =
path.strip_prefix(root)?.display().to_string().replace("\\", "/");
let features = tokens_to_features(&mut feature_map, &tokenize(&s), false);
files.insert(file_name, RefCell::new(features));
}
}
}
class_vectors.push((class.to_owned(), files_to_tests(files)));
}
// Turn feature vectors into input for liblinear.
let mut labels = Vec::new();
let mut features = Vec::new();
for (class_idx, (_, vectors)) in class_vectors.iter().enumerate() {
for (_, vector) in vectors {
labels.push(class_idx as f64);
features.push(vector.iter().copied().map(|i| (i, 1.0)).collect());
}
}
let input_data =
TrainingInput::from_sparse_features(labels, features).map_err(|e| e.to_string())?;
// Train liblinear model.
let mut builder = LiblinearBuilder::new();
builder.problem().input_data(input_data);
builder.parameters().solver_type(SolverType::L1R_L2LOSS_SVC);
let liblinear_model = builder.build_model()?;
// Convert the trained model into sparse representation.
let mut classes = HashMap::default();
let mut used_features = HashSet::default();
for (class_idx
|
prefix
} e
|
conditional_block
|
main.rs
|
also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_start(c))
}
fn is_id_continue(c: char) -> bool {
// This is exactly XID_Continue.
// We also add fast-path for ascii idents
('a'..='z').contains(&c)
|| ('A'..='Z').contains(&c)
|| ('0'..='9').contains(&c)
|| c == '_'
|| (c > '\x7f' && unicode_xid::UnicodeXID::is_xid_continue(c))
}
/// Turn text of a test into tokens.
fn tokenize(s: &str) -> Vec<String> {
let mut state = LexerState::Start;
let mut res = Vec::new();
let mut curtok = String::new();
for c in s.chars() {
if c.is_whitespace() {
if !curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
state = LexerState::Start;
} else if is_id_continue(c) {
match state {
LexerState::Start | LexerState::ContinueIdent => {}
LexerState::ContinuePunct => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinueIdent;
} else {
// Punct
match state {
LexerState::Start | LexerState::ContinuePunct => {}
LexerState::ContinueIdent => {
assert!(!curtok.is_empty());
res.push(mem::take(&mut curtok));
}
}
curtok.push(c);
state = LexerState::ContinuePunct;
}
}
if !curtok.is_empty() {
res.push(mem::take(&mut curtok));
}
res
}
/// Turns all identifiers and digits into a single token.
fn
|
(s: &str) -> &str {
const KEYWORDS: &[&str] = &[
"_",
"as",
"break",
"const",
"continue",
"crate",
"else",
"enum",
"extern",
"false",
"fn",
"for",
"if",
"impl",
"in",
"let",
"loop",
"match",
"mod",
"move",
"mut",
"pub",
"ref",
"return",
"self",
"Self",
"static",
"struct",
"super",
"trait",
"true",
"type",
"unsafe",
"use",
"where",
"while",
"abstract",
"become",
"box",
"do",
"final",
"macro",
"override",
"priv",
"typeof",
"unsized",
"virtual",
"yield",
"async",
"await",
"dyn",
"try",
"auto",
"catch",
"default",
"macro_rules",
"raw",
"union",
];
let first_char = s.chars().next().unwrap();
if is_id_continue(first_char) && !KEYWORDS.contains(&s) {
if is_id_start(first_char) { "и" } else { "ц" }
} else {
s
}
}
/// Turn tokens of a test into features (in their index representation).
/// Tokens, "generalized" tokens, and their bigrams and trigrams are used as features.
fn tokens_to_features(
feature_map: &mut FeatureMap,
tokens: &[String],
read_only: bool,
) -> Vec<u32> {
let mut res = Vec::new();
let mut push = |token| {
if let Some(feat) = feature_map.intern(token, read_only) {
res.push(feat);
}
};
for token in tokens {
push(token.into());
push(generalize(token).into());
}
for [token1, token2] in tokens.array_windows() {
push(format!("{} {}", token1, token2).into());
push(format!("{} {}", generalize(token1), generalize(token2)).into());
}
for [token1, _, token3] in tokens.array_windows() {
push(format!("{} {}", token1, token3).into());
push(format!("{} {}", generalize(token1), generalize(token3)).into());
}
for [token1, token2, token3] in tokens.array_windows() {
push(format!("{} {} {}", token1, token2, token3).into());
push(
format!("{} {} {}", generalize(token1), generalize(token2), generalize(token3)).into(),
);
}
res.sort_unstable();
res.dedup();
res
}
/// Merge features from `foo.rs` and `foo.stderr` into a single feature vector
/// that corresponds to a single test case including multiple files.
fn files_to_tests(files: HashMap<String, RefCell<Vec<u32>>>) -> HashMap<String, Vec<u32>> {
let mut res = HashMap::default();
for (name, features) in &files {
let mut key = name.to_string();
let prefix = if let prefix @ Some(_) = name.strip_suffix(".nll.stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stderr") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".stdout") {
prefix
} else if let prefix @ Some(_) = name.strip_suffix(".fixed") {
prefix
} else {
None
};
if let Some(prefix) = prefix {
let normalized = prefix.to_string() + ".rs";
if files.contains_key(&normalized) {
key = normalized;
}
}
merge_features(res.entry(key).or_default(), &mut features.borrow_mut());
}
res
}
fn merge_features(dst: &mut Vec<u32>, src: &mut Vec<u32>) {
dst.append(src);
dst.sort_unstable();
dst.dedup();
}
/// Dot product of weight vector from the trained linear model
/// and feature vector from a new test case that needs to be classified.
/// Both vectors are sparse.
fn get_decision_value(m: &[(u32, f64)], x: &[u32]) -> f64 {
let mut res = 0.0;
for index in x {
match m.binary_search_by_key(index, |node| node.0) {
Ok(i) => res += m[i].1,
Err(..) => {}
}
}
res
}
/// Train classifier and write it to `model.json`.
fn train(root: &Path) -> Result<(), Box<dyn Error>> {
const EXCLUDED_SUBDIRS: &[&str] =
&["auxiliary", "bad", "did_you_mean", "error-codes", "issues", "rfcs", "span"];
// Build feature vectors for already classified tests.
let mut feature_map = FeatureMap::default();
feature_map.features.push(String::new()); // feature indices must start with 1
let mut class_vectors = Vec::new();
for top_entry in fs::read_dir(root)? {
let top_entry = top_entry?;
if !top_entry.file_type()?.is_dir()
|| EXCLUDED_SUBDIRS.contains(&top_entry.file_name().to_str().unwrap())
{
continue;
}
let top_path = top_entry.path();
let class = top_path.file_name().unwrap().to_str().unwrap();
let mut files = HashMap::default();
for entry in
WalkDir::new(&top_path).into_iter().filter_entry(|e| e.file_name() != "auxiliary")
{
let entry = entry?;
if !entry.file_type().is_dir() {
let path = entry.path();
if let Ok(s) = fs::read_to_string(path) {
let file_name =
path.strip_prefix(root)?.display().to_string().replace("\\", "/");
let features = tokens_to_features(&mut feature_map, &tokenize(&s), false);
files.insert(file_name, RefCell::new(features));
}
}
}
class_vectors.push((class.to_owned(), files_to_tests(files)));
}
// Turn feature vectors into input for liblinear.
let mut labels = Vec::new();
let mut features = Vec::new();
for (class_idx, (_, vectors)) in class_vectors.iter().enumerate() {
for (_, vector) in vectors {
labels.push(class_idx as f64);
features.push(vector.iter().copied().map(|i| (i, 1.0)).collect());
}
}
let input_data =
TrainingInput::from_sparse_features(labels, features).map_err(|e| e.to_string())?;
// Train liblinear model.
let mut builder = LiblinearBuilder::new();
builder.problem().input_data(input_data);
builder.parameters().solver_type(SolverType::L1R_L2LOSS_SVC);
let liblinear_model = builder.build_model()?;
// Convert the trained model into sparse representation.
let mut classes = HashMap::default();
let mut used_features = HashSet::default();
for (class
|
generalize
|
identifier_name
|
lib.rs
|
() -> c_int {
unsafe {
zmq_ffi::zmq_errno()
}
}
fn strerror(errnum: c_int) -> String {
unsafe {
let s = zmq_ffi::zmq_strerror(errnum);
ffi::CStr::from_ptr(s).to_str().unwrap().to_string()
}
}
/// Report 0MQ library version
///
/// Binding of `void zmq_version (int *major, int *minor, int *patch)`
///
/// The function will return tuple of major, minor and patch of the ØMQ library version.
pub fn version() -> (i32, i32, i32) {
let mut major = 0;
let mut minor = 0;
let mut patch = 0;
unsafe {
zmq_ffi::zmq_version(&mut major, &mut minor, &mut patch);
}
(major as i32, minor as i32, patch as i32)
}
#[derive(Clone)]
pub struct Error {
err_num: c_int,
err_str: String,
}
impl Error {
fn from_last_err() -> Error {
let err_num = errno();
let err_str = strerror(err_num);
Error {
err_num: err_num,
err_str: err_str,
}
}
pub fn get_errno(&self) -> Errno {
self.err_num as Errno
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{} (code {})", self.err_str, self.err_num)
}
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl std::error::Error for Error {
fn description(&self) -> &str {
&self.err_str
}
}
type ContextOption = c_int;
const IO_THREADS: ContextOption = 1; // get / set
const MAX_SOCKETS: ContextOption = 2; // get / set
const SOCKET_LIMIT: ContextOption = 3; // get /
const THREAD_PRIORITY: ContextOption = 3; // / set
const THREAD_SCHED_POLICY: ContextOption = 4;// / set
const IPV6: ContextOption = 42; // get / set
macro_rules! getctxopt_template {
($name: ident, $opt: expr) => {
pub fn $name(&self) -> Result<i32, Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_get(self.ctx_ptr, $opt as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(rc)
}
}
};
($name: ident, $opt: expr, $map: expr, $rt: ty) => {
pub fn $name(&self) -> Result<$rt, Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_get(self.ctx_ptr, $opt as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok($map(rc))
}
}
};
}
macro_rules! setctxopt_template {
($name: ident, $opt: expr) => {
pub fn $name(&mut self, optval: i32) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_set(self.ctx_ptr, $opt as c_int, optval as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
};
}
pub struct Context {
ctx_ptr: *mut c_void,
}
impl Context {
/// Create new 0MQ context
///
/// Binding of `void *zmq_ctx_new ();`
///
/// The function creates a new ØMQ context.
/// # Thread safety
/// A ØMQ context is thread safe and may be shared among as many application threads as necessary,
/// without any additional locking required on the part of the caller.
pub fn new() -> Result<Context, Error> {
let ctx_ptr = unsafe { zmq_ffi::zmq_ctx_new() };
ret_when_null!(ctx_ptr);
Ok(Context {
ctx_ptr: ctx_ptr,
})
}
/// Destroy a 0MQ context
///
/// Binding of `int zmq_ctx_term (void *context);`
/// This function will be called automatically when context goes out of scope
fn term(&mut self) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_term(self.ctx_ptr) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Shutdown a 0MQ context
///
/// Binding of `int zmq_ctx_shutdown (void *context);`
///
/// The function will shutdown the ØMQ context context.
/// Context shutdown will cause any blocking operations currently in progress on sockets open within context to return immediately with an error code of ETERM.
/// With the exception of Socket::Close(), any further operations on sockets open within context will fail with an error code of ETERM.
pub fn shutdown(&mut self) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_shutdown(self.ctx_ptr) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
getctxopt_template!(get_io_threads, IO_THREADS);
getctxopt_template!(get_max_sockets, MAX_SOCKETS);
getctxopt_template!(get_socket_limit, SOCKET_LIMIT);
getctxopt_template!(is_ipv6_enabled, IPV6, |r| { r > 0 }, bool);
setctxopt_template!(set_io_threads, IO_THREADS);
setctxopt_template!(set_max_sockets, MAX_SOCKETS);
setctxopt_template!(set_thread_priority, THREAD_PRIORITY);
setctxopt_template!(set_thread_sched_policy, THREAD_SCHED_POLICY);
setctxopt_template!(set_ipv6, IPV6);
/// Create 0MQ socket
///
/// Binding of `void *zmq_socket (void *context, int type);`
///
/// The type argument specifies the socket type, which determines the semantics of communication over the socket.
/// The newly created socket is initially unbound, and not associated with any endpoints.
/// In order to establish a message flow a socket must first be connected to at least one endpoint with Scoket::Connect,
/// or at least one endpoint must be created for accepting incoming connections with Socket::Bind().
pub fn socket(&self, t: SocketType) -> Result<Socket, Error> {
let socket = unsafe { zmq_ffi::zmq_socket(self.ctx_ptr, t as c_int) };
ret_when_null!(socket);
Ok(Socket::from_raw(socket))
}
}
unsafe impl Send for Context {}
unsafe impl Sync for Context {}
impl Drop for Context {
fn drop(&mut self) {
loop {
match self.term() {
Ok(_) => { },
Err(e) => {
if e.get_errno() == EINTR {
continue;
} else {
break;
}
}
}
}
}
}
const MSG_SIZE: usize = 64;
pub struct Message {
msg: zmq_ffi::zmq_msg_t,
}
unsafe extern "C" fn zmq_free_fn(data: *mut c_void, hint: *mut c_void) {
let slice = slice::from_raw_parts_mut(data as *mut u8, hint as usize);
let _: Box<[u8]> = Box::from_raw(slice);
}
impl Message {
/// initialise empty 0MQ message.
///
/// Binding of `int zmq_msg_init (zmq_msg_t *msg);`.
///
/// The function will return a message object to represent an empty message.
/// This function is most useful when called before receiving a message.
pub fn new() -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init(&mut msg) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message of a specified size.
///
/// Binding of `int zmq_msg_init_size (zmq_msg_t *msg, size_t size);`.
///
/// The function will allocate any resources required to store a message size bytes long and
/// return a message object to represent the newly allocated message.
pub fn with_capcity(len: usize) -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init_size(&mut msg, len as size_t) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message from a supplied std::vec::Vec<u8>.
|
errno
|
identifier_name
|
|
lib.rs
|
message.
pub fn with_capcity(len: usize) -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init_size(&mut msg, len as size_t) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message from a supplied std::vec::Vec<u8>.
///
/// Binding of `int zmq_msg_init_data (zmq_msg_t *msg, void *data,
/// size_t size, zmq_free_fn *ffn, void *hint);`.
///
/// The function will take ownership of the Vec and
/// return a message object to represent the content referenced by the Vec.
///
/// No copy of data will be performed.
pub fn from_vec(vec: Vec<u8>) -> Result<Message, Error> {
let len = vec.len() as size_t;
let data = vec.into_boxed_slice();
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe {
zmq_ffi::zmq_msg_init_data(&mut msg, Box::into_raw(data) as *mut c_void, len,
zmq_free_fn, len as *mut _)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
pub fn from_slice(data: &[u8]) -> Result<Message, Error> {
unsafe {
let mut msg = try!(Message::with_capcity(data.len()));
std::ptr::copy_nonoverlapping(data.as_ptr(), msg.as_mut_ptr(), data.len());
Ok(msg)
}
}
/// Move content of a message to another message.
///
/// Binding of `int zmq_msg_move (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Move the content of the message object referenced by src to the message object referenced by dest.
/// No actual copying of message content is performed,
/// dest is simply updated to reference the new content.
/// src becomes an empty message after calling Message::msg_move().
/// The original content of dest, if any, will be released
pub fn msg_move(dest: &mut Message, src: &mut Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_move(&mut dest.msg, &mut src.msg)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Copy content of a message to another message.
///
/// Binding of `int zmq_msg_copy (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Copy the message object referenced by src to the message object referenced by dest.
/// The original content of dest, if any, will be released.
pub fn msg_copy(dest: &mut Message, src: &Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_copy(&mut dest.msg, transmute(&src.msg))
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Retrieve pointer to message content.
///
/// Binding of `void *zmq_msg_data (zmq_msg_t *msg);`.
///
/// The function will return a pointer to the message content.
pub unsafe fn get_data_ptr(&mut self) -> *mut c_void {
zmq_ffi::zmq_msg_data(&mut self.msg)
}
/// Retrieve pointer to message content.
///
/// Binding of `void *zmq_msg_data (zmq_msg_t *msg);`.
///
/// The function will return a pointer to the message content.
pub unsafe fn get_const_data_ptr(&self) -> *const c_void {
zmq_ffi::zmq_msg_data(transmute(&self.msg))
}
/// Retrieve message content size in bytes
///
/// Binding of `size_t zmq_msg_size (zmq_msg_t *msg);`
///
/// The function will return the size in bytes of the content of the message.
pub fn len(&self) -> usize {
unsafe { zmq_ffi::zmq_msg_size(transmute(&self.msg)) }
}
/// Indicate if there are more message parts to receive
///
/// Binding of `int zmq_msg_more (zmq_msg_t *message);`
///
/// The function indicates whether this is part of a multi-part message, and there are further parts to receive.
/// This method is identical to xxxxx with an argument of ZMQ_MORE.
pub fn has_more(&self) -> bool {
unsafe { zmq_ffi::zmq_msg_more(transmute(&self.msg)) > 0 }
}
/// Get message property
///
/// Binding of `int zmq_msg_get (zmq_msg_t *message, int property);`
///
/// The function will return the value for the property specified by the property argument.
pub fn get_property(&self, property: MessageProperty) -> Result<i32, Error> {
let rc = unsafe { zmq_ffi::zmq_msg_get(transmute(&self.msg), property as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(rc)
}
}
// zmq_msg_set is not used this while
// pub fn set_property(&mut self, property: c_int, optval: i32) -> Result<(), Error> { }
/// Get message metadata property
///
/// Binding of `const char *zmq_msg_gets (zmq_msg_t *message, const char *property);`
///
/// The function will return the string value for the metadata property specified by the property argument.
/// Metadata is defined on a per-connection basis during the ZeroMQ connection handshake as specified in <rfc.zeromq.org/spec:37>.
/// The following ZMTP properties can be retrieved with the function:
/// `Socket-Type`
/// `Identity`
/// `Resource`
/// Additionally, when available for the underlying transport,
/// the Peer-Address property will return the IP address of the remote endpoint as returned by getnameinfo(2).
/// Other properties may be defined based on the underlying security mechanism.
pub fn get_meta<'a>(&'a self, property: &str) -> Option<&'a str> {
let prop_cstr = ffi::CString::new(property).unwrap();
let returned_str_ptr = unsafe { zmq_ffi::zmq_msg_gets(transmute(&self.msg), transmute(prop_cstr.as_ptr())) };
if returned_str_ptr.is_null() {
None
} else {
unsafe { Some(ffi::CStr::from_ptr(returned_str_ptr).to_str().unwrap()) }
}
}
}
impl Deref for Message {
type Target = [u8];
fn deref<'a>(&'a self) -> &'a [u8] {
unsafe {
let ptr = self.get_const_data_ptr();
let len = self.len() as usize;
slice::from_raw_parts(transmute(ptr), len)
}
}
}
impl DerefMut for Message {
fn deref_mut<'a>(&'a mut self) -> &'a mut [u8] {
unsafe {
let ptr = self.get_data_ptr();
let len = self.len() as usize;
slice::from_raw_parts_mut(transmute(ptr), len)
}
}
}
impl Drop for Message {
fn drop(&mut self) {
loop {
let rc = unsafe { zmq_ffi::zmq_msg_close(&mut self.msg) };
if rc != 0 {
let e = Error::from_last_err();
if e.get_errno() == EINTR {
continue;
} else {
panic!(e);
}
} else {
break;
}
}
}
}
pub type SocketType = c_int;
pub const PAIR: SocketType = 0;
pub const PUB: SocketType = 1;
pub const SUB: SocketType = 2;
pub const REQ: SocketType = 3;
pub const REP: SocketType = 4;
pub const DEALER: SocketType = 5;
pub const ROUTER: SocketType = 6;
pub const PULL: SocketType = 7;
pub const PUSH: SocketType = 8;
pub const XPUB: SocketType = 9;
pub const XSUB: SocketType = 10;
pub const STREAM: SocketType = 11;
pub type MessageProperty = c_int;
pub const MORE: MessageProperty = 1;
pub const SRCFD: MessageProperty = 2;
pub const SHARED: MessageProperty = 3;
pub type SecurityMechanism = c_int;
pub const ZMQ_NULL: SecurityMechanism = 0;
pub const ZMQ_PLAIN: SecurityMechanism = 1;
pub const ZMQ_CURVE: SecurityMechanism = 2;
pub const ZMQ_GSSAPI: SecurityMechanism = 3;
/// Check a ZMQ capability
///
|
/// Bindng of `int zmq_has (const char *capability);`
///
/// The function shall report whether a specified capability is available in the library
|
random_line_split
|
|
lib.rs
|
name: ident, $opt: expr, $map: expr, $rt: ty) => {
pub fn $name(&self) -> Result<$rt, Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_get(self.ctx_ptr, $opt as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok($map(rc))
}
}
};
}
macro_rules! setctxopt_template {
($name: ident, $opt: expr) => {
pub fn $name(&mut self, optval: i32) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_set(self.ctx_ptr, $opt as c_int, optval as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
};
}
pub struct Context {
ctx_ptr: *mut c_void,
}
impl Context {
/// Create new 0MQ context
///
/// Binding of `void *zmq_ctx_new ();`
///
/// The function creates a new ØMQ context.
/// # Thread safety
/// A ØMQ context is thread safe and may be shared among as many application threads as necessary,
/// without any additional locking required on the part of the caller.
pub fn new() -> Result<Context, Error> {
let ctx_ptr = unsafe { zmq_ffi::zmq_ctx_new() };
ret_when_null!(ctx_ptr);
Ok(Context {
ctx_ptr: ctx_ptr,
})
}
/// Destroy a 0MQ context
///
/// Binding of `int zmq_ctx_term (void *context);`
/// This function will be called automatically when context goes out of scope
fn term(&mut self) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_term(self.ctx_ptr) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Shutdown a 0MQ context
///
/// Binding of `int zmq_ctx_shutdown (void *context);`
///
/// The function will shutdown the ØMQ context context.
/// Context shutdown will cause any blocking operations currently in progress on sockets open within context to return immediately with an error code of ETERM.
/// With the exception of Socket::Close(), any further operations on sockets open within context will fail with an error code of ETERM.
pub fn shutdown(&mut self) -> Result<(), Error> {
let rc = unsafe { zmq_ffi::zmq_ctx_shutdown(self.ctx_ptr) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
getctxopt_template!(get_io_threads, IO_THREADS);
getctxopt_template!(get_max_sockets, MAX_SOCKETS);
getctxopt_template!(get_socket_limit, SOCKET_LIMIT);
getctxopt_template!(is_ipv6_enabled, IPV6, |r| { r > 0 }, bool);
setctxopt_template!(set_io_threads, IO_THREADS);
setctxopt_template!(set_max_sockets, MAX_SOCKETS);
setctxopt_template!(set_thread_priority, THREAD_PRIORITY);
setctxopt_template!(set_thread_sched_policy, THREAD_SCHED_POLICY);
setctxopt_template!(set_ipv6, IPV6);
/// Create 0MQ socket
///
/// Binding of `void *zmq_socket (void *context, int type);`
///
/// The type argument specifies the socket type, which determines the semantics of communication over the socket.
/// The newly created socket is initially unbound, and not associated with any endpoints.
/// In order to establish a message flow a socket must first be connected to at least one endpoint with Scoket::Connect,
/// or at least one endpoint must be created for accepting incoming connections with Socket::Bind().
pub fn socket(&self, t: SocketType) -> Result<Socket, Error> {
let socket = unsafe { zmq_ffi::zmq_socket(self.ctx_ptr, t as c_int) };
ret_when_null!(socket);
Ok(Socket::from_raw(socket))
}
}
unsafe impl Send for Context {}
unsafe impl Sync for Context {}
impl Drop for Context {
fn drop(&mut self) {
loop {
match self.term() {
Ok(_) => { },
Err(e) => {
if e.get_errno() == EINTR {
continue;
} else {
break;
}
}
}
}
}
}
const MSG_SIZE: usize = 64;
pub struct Message {
msg: zmq_ffi::zmq_msg_t,
}
unsafe extern "C" fn zmq_free_fn(data: *mut c_void, hint: *mut c_void) {
let slice = slice::from_raw_parts_mut(data as *mut u8, hint as usize);
let _: Box<[u8]> = Box::from_raw(slice);
}
impl Message {
/// initialise empty 0MQ message.
///
/// Binding of `int zmq_msg_init (zmq_msg_t *msg);`.
///
/// The function will return a message object to represent an empty message.
/// This function is most useful when called before receiving a message.
pub fn new() -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init(&mut msg) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message of a specified size.
///
/// Binding of `int zmq_msg_init_size (zmq_msg_t *msg, size_t size);`.
///
/// The function will allocate any resources required to store a message size bytes long and
/// return a message object to represent the newly allocated message.
pub fn with_capcity(len: usize) -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init_size(&mut msg, len as size_t) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message from a supplied std::vec::Vec<u8>.
///
/// Binding of `int zmq_msg_init_data (zmq_msg_t *msg, void *data,
/// size_t size, zmq_free_fn *ffn, void *hint);`.
///
/// The function will take ownership of the Vec and
/// return a message object to represent the content referenced by the Vec.
///
/// No copy of data will be performed.
pub fn from_vec(vec: Vec<u8>) -> Result<Message, Error> {
let len = vec.len() as size_t;
let data = vec.into_boxed_slice();
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe {
zmq_ffi::zmq_msg_init_data(&mut msg, Box::into_raw(data) as *mut c_void, len,
zmq_free_fn, len as *mut _)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
pub fn from_slice(data: &[u8]) -> Result<Message, Error> {
|
/// Move content of a message to another message.
///
/// Binding of `int zmq_msg_move (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Move the content of the message object referenced by src to the message object referenced by dest.
/// No actual copying of message content is performed,
/// dest is simply updated to reference the new content.
/// src becomes an empty message after calling Message::msg_move().
/// The original content of dest, if any, will be released
pub fn msg_move(dest: &mut Message, src: &mut Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_move(&mut dest.msg, &mut src.msg)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Copy content of a message to another message.
///
/// Binding of `int zmq_msg_copy (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Copy the message object referenced by src to the message object referenced by dest.
/// The original content of dest, if any, will be released.
pub fn msg_copy(dest: &mut Message, src: &Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_copy(&mut dest.msg, transmute(&src.msg))
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Retrieve pointer to message content.
///
/// Binding of
|
unsafe {
let mut msg = try!(Message::with_capcity(data.len()));
std::ptr::copy_nonoverlapping(data.as_ptr(), msg.as_mut_ptr(), data.len());
Ok(msg)
}
}
|
identifier_body
|
lib.rs
|
mq_socket (void *context, int type);`
///
/// The type argument specifies the socket type, which determines the semantics of communication over the socket.
/// The newly created socket is initially unbound, and not associated with any endpoints.
/// In order to establish a message flow a socket must first be connected to at least one endpoint with Scoket::Connect,
/// or at least one endpoint must be created for accepting incoming connections with Socket::Bind().
pub fn socket(&self, t: SocketType) -> Result<Socket, Error> {
let socket = unsafe { zmq_ffi::zmq_socket(self.ctx_ptr, t as c_int) };
ret_when_null!(socket);
Ok(Socket::from_raw(socket))
}
}
unsafe impl Send for Context {}
unsafe impl Sync for Context {}
impl Drop for Context {
fn drop(&mut self) {
loop {
match self.term() {
Ok(_) => { },
Err(e) => {
if e.get_errno() == EINTR {
continue;
} else {
break;
}
}
}
}
}
}
const MSG_SIZE: usize = 64;
pub struct Message {
msg: zmq_ffi::zmq_msg_t,
}
unsafe extern "C" fn zmq_free_fn(data: *mut c_void, hint: *mut c_void) {
let slice = slice::from_raw_parts_mut(data as *mut u8, hint as usize);
let _: Box<[u8]> = Box::from_raw(slice);
}
impl Message {
/// initialise empty 0MQ message.
///
/// Binding of `int zmq_msg_init (zmq_msg_t *msg);`.
///
/// The function will return a message object to represent an empty message.
/// This function is most useful when called before receiving a message.
pub fn new() -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init(&mut msg) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message of a specified size.
///
/// Binding of `int zmq_msg_init_size (zmq_msg_t *msg, size_t size);`.
///
/// The function will allocate any resources required to store a message size bytes long and
/// return a message object to represent the newly allocated message.
pub fn with_capcity(len: usize) -> Result<Message, Error> {
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe { zmq_ffi::zmq_msg_init_size(&mut msg, len as size_t) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
/// Initialise 0MQ message from a supplied std::vec::Vec<u8>.
///
/// Binding of `int zmq_msg_init_data (zmq_msg_t *msg, void *data,
/// size_t size, zmq_free_fn *ffn, void *hint);`.
///
/// The function will take ownership of the Vec and
/// return a message object to represent the content referenced by the Vec.
///
/// No copy of data will be performed.
pub fn from_vec(vec: Vec<u8>) -> Result<Message, Error> {
let len = vec.len() as size_t;
let data = vec.into_boxed_slice();
let mut msg = zmq_ffi::zmq_msg_t { unknown: [0; MSG_SIZE] };
let rc = unsafe {
zmq_ffi::zmq_msg_init_data(&mut msg, Box::into_raw(data) as *mut c_void, len,
zmq_free_fn, len as *mut _)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(Message { msg: msg })
}
}
pub fn from_slice(data: &[u8]) -> Result<Message, Error> {
unsafe {
let mut msg = try!(Message::with_capcity(data.len()));
std::ptr::copy_nonoverlapping(data.as_ptr(), msg.as_mut_ptr(), data.len());
Ok(msg)
}
}
/// Move content of a message to another message.
///
/// Binding of `int zmq_msg_move (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Move the content of the message object referenced by src to the message object referenced by dest.
/// No actual copying of message content is performed,
/// dest is simply updated to reference the new content.
/// src becomes an empty message after calling Message::msg_move().
/// The original content of dest, if any, will be released
pub fn msg_move(dest: &mut Message, src: &mut Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_move(&mut dest.msg, &mut src.msg)
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Copy content of a message to another message.
///
/// Binding of `int zmq_msg_copy (zmq_msg_t *dest, zmq_msg_t *src);`.
///
/// Copy the message object referenced by src to the message object referenced by dest.
/// The original content of dest, if any, will be released.
pub fn msg_copy(dest: &mut Message, src: &Message) -> Result<(), Error> {
let rc = unsafe {
zmq_ffi::zmq_msg_copy(&mut dest.msg, transmute(&src.msg))
};
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(())
}
}
/// Retrieve pointer to message content.
///
/// Binding of `void *zmq_msg_data (zmq_msg_t *msg);`.
///
/// The function will return a pointer to the message content.
pub unsafe fn get_data_ptr(&mut self) -> *mut c_void {
zmq_ffi::zmq_msg_data(&mut self.msg)
}
/// Retrieve pointer to message content.
///
/// Binding of `void *zmq_msg_data (zmq_msg_t *msg);`.
///
/// The function will return a pointer to the message content.
pub unsafe fn get_const_data_ptr(&self) -> *const c_void {
zmq_ffi::zmq_msg_data(transmute(&self.msg))
}
/// Retrieve message content size in bytes
///
/// Binding of `size_t zmq_msg_size (zmq_msg_t *msg);`
///
/// The function will return the size in bytes of the content of the message.
pub fn len(&self) -> usize {
unsafe { zmq_ffi::zmq_msg_size(transmute(&self.msg)) }
}
/// Indicate if there are more message parts to receive
///
/// Binding of `int zmq_msg_more (zmq_msg_t *message);`
///
/// The function indicates whether this is part of a multi-part message, and there are further parts to receive.
/// This method is identical to xxxxx with an argument of ZMQ_MORE.
pub fn has_more(&self) -> bool {
unsafe { zmq_ffi::zmq_msg_more(transmute(&self.msg)) > 0 }
}
/// Get message property
///
/// Binding of `int zmq_msg_get (zmq_msg_t *message, int property);`
///
/// The function will return the value for the property specified by the property argument.
pub fn get_property(&self, property: MessageProperty) -> Result<i32, Error> {
let rc = unsafe { zmq_ffi::zmq_msg_get(transmute(&self.msg), property as c_int) };
if rc == -1 {
Err(Error::from_last_err())
} else {
Ok(rc)
}
}
// zmq_msg_set is not used this while
// pub fn set_property(&mut self, property: c_int, optval: i32) -> Result<(), Error> { }
/// Get message metadata property
///
/// Binding of `const char *zmq_msg_gets (zmq_msg_t *message, const char *property);`
///
/// The function will return the string value for the metadata property specified by the property argument.
/// Metadata is defined on a per-connection basis during the ZeroMQ connection handshake as specified in <rfc.zeromq.org/spec:37>.
/// The following ZMTP properties can be retrieved with the function:
/// `Socket-Type`
/// `Identity`
/// `Resource`
/// Additionally, when available for the underlying transport,
/// the Peer-Address property will return the IP address of the remote endpoint as returned by getnameinfo(2).
/// Other properties may be defined based on the underlying security mechanism.
pub fn get_meta<'a>(&'a self, property: &str) -> Option<&'a str> {
let prop_cstr = ffi::CString::new(property).unwrap();
let returned_str_ptr = unsafe { zmq_ffi::zmq_msg_gets(transmute(&self.msg), transmute(prop_cstr.as_ptr())) };
if returned_str_ptr.is_null() {
|
None
} els
|
conditional_block
|
|
ui.js
|
this.currentframe = 0;
this.frame_x = [];
this.anim_speed = 2;
this.init = function(getLayerContext, state){
this.layercontext = getLayerContext(this.LAYER_ID);
this.state = state;
$("#name").css({
"display": "block",
"z-index": this.LAYER_ID+1
});
$("#name>form").on({
submit: this.enter_name.bind(this)
});
$("#name>form>input").mouseup(function(event){
$(this).focus();
});
this.score = $("#score").css({
"display": "block",
"z-index": this.LAYER_ID+1
});
this.multiplier = $("#planes").css({
"display": "block",
"z-index": this.LAYER_ID+1
});
for(var i=0; i<3; i++){
this.images[i] = new Image();
this.images[i].src = this.img_src[i];
this.image_divs[i] = $("<div></div>",{
"id": "plane"+i,
"class": "ui"
}).css({
"opacity": 0,
"padding": "0px 0px 0px 0px",
"display": "inline",
"z-index": this.LAYER_ID+1
}).on({
mouseup: this.select_plane.bind(this)
}).appendTo(this.multiplier);
this.plane_choices[i] = false;
}
this.difficulty = $("#difficulty").css({
"display": "block",
"z-index": this.LAYER_ID+1
});
for(var i=0; i<4; i++){
this.difficulty_divs[i] = $("<div>"+this.difficulties[i]+"</div>")
.css({
"display": "inline",
"z-index": this.LAYER_ID+1,
"padding": "10px 10px 10px 10px"
}).on({
mouseup: this.select_difficulty.bind(this)
}).appendTo(this.difficulty);
}
this.play_button = $("#play").css({
"display": "block",
"z-index": this.LAYER_ID+1
}).on({
mouseup: this.play_clicked.bind(this)
});
this.instructions = $("#instructions").css({
"display": "block",
"z-index": this.LAYER_ID+1
});
this.score_menu = $("#all_scores").css({
"display": "block",
"z-index": this.LAYER_ID+1
}).on({
mouseup: this.show_scores.bind(this)
});
this.minimize_score_button = $("#minimize").on({
mouseup: this.hide_scores.bind(this)
});
this.loading_img = new Image();
this.loading_img.src = "media/loading.png";
this.loading_img.width = this.loading_img.height = 128;
for(var i=0; i<19; i++){
this.frame_x[i] = i * this.loading_img.width;
|
switch(this.state){
case "starting":
$(".ui").css({
"z-index": this.LAYER_ID+1
});
break;
case "running":
break;
case "loading":
//check if images finish loading before starting game
var allLoaded = true;
for(var i=0; i<game.system.entities.length; i++){
allLoaded = allLoaded && game.system.entities[i].isLoaded;
}
if(!allLoaded) {
break;
}
this.score.css("z-index", this.LAYER_ID+1);
game.system.layermanager.clearLayer(this.LAYER_ID);
game.system.state = this.state = "running";
break;
default:
break;
}
};
this.draw = function(){
switch(this.state){
case "starting":
this.score.html("High Score: "+game.system.highscore)
.css({
"background-color": "",
"top": "",
"left": ""
});
this.layercontext.fillRect(0,0,game.CANVAS_W,game.CANVAS_H);
var x = this.multiplier.position().left + this.multiplier.outerWidth();
y = this.multiplier.position().top;
var img_width, img_x;
for(var i=0; i<this.images.length; i++){
img_width = this.multiplier.outerHeight()*this.images[i].width/this.images[i].height;
img_x = x + 30+ i*(img_width+20);
this.layercontext.drawImage(this.images[i],
img_x, y,
img_width, this.multiplier.outerHeight()
);
if(this.plane_choices[i]){
this.image_divs[i].css({
"opacity": 1,
"background-color": "rgba(222,135,229,0.5)",
"top": 0,
"left": img_x - this.multiplier.position().left,
"width": img_width,
"height": this.multiplier.outerHeight()
});
}else{
this.image_divs[i].css({
"opacity": 0,
"top": 0,
"left": img_x - this.multiplier.position().left,
"width": img_width,
"height": this.multiplier.outerHeight()
});
}
}
break;
case "running":
this.score.html(game.system.score)
.css({
"background-color": "rgba(222,135,229,0.8)",
"top": 0,
"left": "85%"
});
break;
case "loading":
this.score.css("z-index", -1);
this.layercontext.save();
this.layercontext.translate(game.CANVAS_W/2, game.CANVAS_H/2);
this.layercontext.drawImage(this.loading_img,
this.frame_x[this.currentframe/this.anim_speed], 0,
this.loading_img.width, this.loading_img.height,
-this.loading_img.width/2, -this.loading_img.height/2,
this.loading_img.width, this.loading_img.height
);
this.layercontext.restore();
this.currentframe++;
if(this.currentframe == this.frame_x.length*this.anim_speed){
this.currentframe = 0;
}
break;
default:
break;
}
};
this.resized = function(){
this.layercontext.fillStyle = "rgba(256,256,256,0.4)";
this.layercontext.strokeStyle = "rgba(0,82,156, 0.8)";
this.layercontext.lineWidth = 5;
};
this.play_clicked = function(event){
//init players
game.system.num_players = 0;
for(var i=0; i<this.plane_choices.length; i++){
if(this.plane_choices[i]){
game.system.players[game.system.num_players++] = new game.Player(i, game.system.layermanager);
}
}
//init missiles
switch (this.selected_difficulty){
case "Easy":
game.system.num_enemies = 15;
break;
case "Medium":
game.system.num_enemies = 30;
break;
case "Hard":
game.system.num_enemies = 60;
break;
case "Insane":
game.system.num_enemies = 120;
break;
default:
break;
}
for(var i=0; i<game.system.num_enemies; i++){
game.system.enemies[i] = new game.Enemy("missile", game.system.layermanager);
}
if(game.system.num_players!=0 && game.system.num_enemies!=0){
//add all players and missiles to entities
game.system.entities.length = 0;
for(var i=0; i<game.system.num_players; i++){
game.system.entities[game.system.entities.length] = game.system.players[i];
game.system.players[i].state = "alive";
}
for(var i=0; i<game.system.num_enemies; i++){
game.system.entities[game.system.entities.length] = game.system.enemies[i];
game.system.enemies[i].state = "setup";
}
}
//clear ui
$(".ui").css({
"z-index": -1
});
this.score.css("z-index", this.LAYER_ID+1);
game.system.layermanager.clearLayer(this.LAYER_ID);
this.state = "loading";
event.preventDefault();
event.stopPropagation();
return false;
};
this.select_plane = function(event){
if(event.which == this.LEFT_CLICK){
var target = event.target;
for(var i=0; i<this.image_divs.length; i++){
if(target.id == this.image_divs[i].attr
|
}
};
this.update = function(){
|
random_line_split
|
ui.js
|
this.currentframe = 0;
this.frame_x = [];
this.anim_speed = 2;
this.init = function(getLayerContext, state){
this.layercontext = getLayerContext(this.LAYER_ID);
this.state = state;
$("#name").css({
"display": "block",
"z-index": this.LAYER_ID+1
});
$("#name>form").on({
submit: this.enter_name.bind(this)
});
$("#name>form>input").mouseup(function(event){
$(this).focus();
});
this.score = $("#score").css({
"display": "block",
"z-index": this.LAYER_ID+1
});
this.multiplier = $("#planes").css({
"display": "block",
"z-index": this.LAYER_ID+1
});
for(var i=0; i<3; i++){
this.images[i] = new Image();
this.images[i].src = this.img_src[i];
this.image_divs[i] = $("<div></div>",{
"id": "plane"+i,
"class": "ui"
}).css({
"opacity": 0,
"padding": "0px 0px 0px 0px",
"display": "inline",
"z-index": this.LAYER_ID+1
}).on({
mouseup: this.select_plane.bind(this)
}).appendTo(this.multiplier);
this.plane_choices[i] = false;
}
this.difficulty = $("#difficulty").css({
"display": "block",
"z-index": this.LAYER_ID+1
});
for(var i=0; i<4; i++){
this.difficulty_divs[i] = $("<div>"+this.difficulties[i]+"</div>")
.css({
"display": "inline",
"z-index": this.LAYER_ID+1,
"padding": "10px 10px 10px 10px"
}).on({
mouseup: this.select_difficulty.bind(this)
}).appendTo(this.difficulty);
}
this.play_button = $("#play").css({
"display": "block",
"z-index": this.LAYER_ID+1
}).on({
mouseup: this.play_clicked.bind(this)
});
this.instructions = $("#instructions").css({
"display": "block",
"z-index": this.LAYER_ID+1
});
this.score_menu = $("#all_scores").css({
"display": "block",
"z-index": this.LAYER_ID+1
}).on({
mouseup: this.show_scores.bind(this)
});
this.minimize_score_button = $("#minimize").on({
mouseup: this.hide_scores.bind(this)
});
this.loading_img = new Image();
this.loading_img.src = "media/loading.png";
this.loading_img.width = this.loading_img.height = 128;
for(var i=0; i<19; i++){
this.frame_x[i] = i * this.loading_img.width;
}
};
this.update = function(){
switch(this.state){
case "starting":
$(".ui").css({
"z-index": this.LAYER_ID+1
});
break;
case "running":
break;
case "loading":
//check if images finish loading before starting game
var allLoaded = true;
for(var i=0; i<game.system.entities.length; i++){
allLoaded = allLoaded && game.system.entities[i].isLoaded;
}
if(!allLoaded)
|
this.score.css("z-index", this.LAYER_ID+1);
game.system.layermanager.clearLayer(this.LAYER_ID);
game.system.state = this.state = "running";
break;
default:
break;
}
};
this.draw = function(){
switch(this.state){
case "starting":
this.score.html("High Score: "+game.system.highscore)
.css({
"background-color": "",
"top": "",
"left": ""
});
this.layercontext.fillRect(0,0,game.CANVAS_W,game.CANVAS_H);
var x = this.multiplier.position().left + this.multiplier.outerWidth();
y = this.multiplier.position().top;
var img_width, img_x;
for(var i=0; i<this.images.length; i++){
img_width = this.multiplier.outerHeight()*this.images[i].width/this.images[i].height;
img_x = x + 30+ i*(img_width+20);
this.layercontext.drawImage(this.images[i],
img_x, y,
img_width, this.multiplier.outerHeight()
);
if(this.plane_choices[i]){
this.image_divs[i].css({
"opacity": 1,
"background-color": "rgba(222,135,229,0.5)",
"top": 0,
"left": img_x - this.multiplier.position().left,
"width": img_width,
"height": this.multiplier.outerHeight()
});
}else{
this.image_divs[i].css({
"opacity": 0,
"top": 0,
"left": img_x - this.multiplier.position().left,
"width": img_width,
"height": this.multiplier.outerHeight()
});
}
}
break;
case "running":
this.score.html(game.system.score)
.css({
"background-color": "rgba(222,135,229,0.8)",
"top": 0,
"left": "85%"
});
break;
case "loading":
this.score.css("z-index", -1);
this.layercontext.save();
this.layercontext.translate(game.CANVAS_W/2, game.CANVAS_H/2);
this.layercontext.drawImage(this.loading_img,
this.frame_x[this.currentframe/this.anim_speed], 0,
this.loading_img.width, this.loading_img.height,
-this.loading_img.width/2, -this.loading_img.height/2,
this.loading_img.width, this.loading_img.height
);
this.layercontext.restore();
this.currentframe++;
if(this.currentframe == this.frame_x.length*this.anim_speed){
this.currentframe = 0;
}
break;
default:
break;
}
};
this.resized = function(){
this.layercontext.fillStyle = "rgba(256,256,256,0.4)";
this.layercontext.strokeStyle = "rgba(0,82,156, 0.8)";
this.layercontext.lineWidth = 5;
};
this.play_clicked = function(event){
//init players
game.system.num_players = 0;
for(var i=0; i<this.plane_choices.length; i++){
if(this.plane_choices[i]){
game.system.players[game.system.num_players++] = new game.Player(i, game.system.layermanager);
}
}
//init missiles
switch (this.selected_difficulty){
case "Easy":
game.system.num_enemies = 15;
break;
case "Medium":
game.system.num_enemies = 30;
break;
case "Hard":
game.system.num_enemies = 60;
break;
case "Insane":
game.system.num_enemies = 120;
break;
default:
break;
}
for(var i=0; i<game.system.num_enemies; i++){
game.system.enemies[i] = new game.Enemy("missile", game.system.layermanager);
}
if(game.system.num_players!=0 && game.system.num_enemies!=0){
//add all players and missiles to entities
game.system.entities.length = 0;
for(var i=0; i<game.system.num_players; i++){
game.system.entities[game.system.entities.length] = game.system.players[i];
game.system.players[i].state = "alive";
}
for(var i=0; i<game.system.num_enemies; i++){
game.system.entities[game.system.entities.length] = game.system.enemies[i];
game.system.enemies[i].state = "setup";
}
}
//clear ui
$(".ui").css({
"z-index": -1
});
this.score.css("z-index", this.LAYER_ID+1);
game.system.layermanager.clearLayer(this.LAYER_ID);
this.state = "loading";
event.preventDefault();
event.stopPropagation();
return false;
};
this.select_plane = function(event){
if(event.which == this.LEFT_CLICK){
var target = event.target;
for(var i=0; i<this.image_divs.length; i++){
if(target.id == this.image_divs[i
|
{
break;
}
|
conditional_block
|
input.go
|
Input struct {
Block
TextFgColor Attribute
TextBgColor Attribute
IsCapturing bool
IsMultiLine bool
TextBuilder TextBuilder
SpecialChars map[string]string
ShowLineNo bool
Name string
CursorX int
CursorY int
//DebugMode bool
//debugMessage string
// internal vars
lines []string
cursorLineIndex int
cursorLinePos int
}
// NewInput returns a new, initialized Input object. The method receives the initial content for the input (if any)
// and whether it should be initialized as a multi-line innput field or not
func NewInput(s string, isMultiLine bool) *Input {
textArea := &Input{
Block: *NewBlock(),
TextFgColor: ThemeAttr("par.text.fg"),
TextBgColor: ThemeAttr("par.text.bg"),
TextBuilder: NewMarkdownTxBuilder(),
IsMultiLine: isMultiLine,
ShowLineNo: false,
cursorLineIndex: 0,
cursorLinePos: 0,
}
if s != "" {
textArea.SetText(s)
}
if isMultiLine {
textArea.SpecialChars = multiLineCharMap
} else {
textArea.SpecialChars = singleLineCharMap
}
return textArea
}
// StartCapture begins catching events from the /sys/kbd stream and updates the content of the Input field. While
// capturing events, the Input field also publishes its own event stream under the /input/kbd path.
func (i *Input) StartCapture() {
i.IsCapturing = true
Handle("/sys/kbd", func(e Event) {
if i.IsCapturing {
key := e.Data.(EvtKbd).KeyStr
switch key {
case "<up>":
i.moveUp()
case "<down>":
i.moveDown()
case "<left>":
i.moveLeft()
case "<right>":
i.moveRight()
case "C-8":
i.backspace()
default:
// If it's a CTRL something we don't handle then just ignore it
if strings.HasPrefix(key, "C-") {
break
}
newString := i.getCharString(key)
i.addString(newString)
}
if i.Name == "" {
SendCustomEvt("/input/kbd", i.getInputEvt(key))
} else {
SendCustomEvt("/input/" + i.Name + "/kbd", i.getInputEvt(key))
}
Render(i)
}
})
}
// StopCapture tells the Input field to stop accepting events from the /sys/kbd stream
func (i *Input) StopCapture() {
i.IsCapturing = false
}
// Text returns the text of the input field as a string
func (i *Input) Text() string {
if len(i.lines) == 0 {
return ""
}
if len(i.lines) == 1 {
return i.lines[0]
}
if i.IsMultiLine {
return strings.Join(i.lines, NEW_LINE)
} else {
// we should never get here!
return i.lines[0]
}
}
func (i *Input) SetText(text string) {
i.lines = strings.Split(text, NEW_LINE)
}
// Lines returns the slice of strings with the content of the input field. By default lines are separated by \n
func (i *Input) Lines() []string {
return i.lines
}
// Private methods for the input field
// TODO: handle delete key
func (i *Input) backspace() {
curLine := i.lines[i.cursorLineIndex]
// at the beginning of the buffer, nothing to do
if len(curLine) == 0 && i.cursorLineIndex == 0 {
return
}
// at the beginning of a line somewhere in the buffer
if i.cursorLinePos == 0 {
prevLine := i.lines[i.cursorLineIndex-1]
// remove the newline character from the prevline
prevLine = prevLine[:len(curLine)-1] + curLine
i.lines = append(i.lines[:i.cursorLineIndex], i.lines[i.cursorLineIndex+1:]...)
i.cursorLineIndex--
i.cursorLinePos = len(prevLine) - 1
return
}
// I'm at the end of a line
if i.cursorLinePos == len(curLine)-1 {
i.lines[i.cursorLineIndex] = curLine[:len(curLine)-1]
i.cursorLinePos--
return
}
// I'm in the middle of a line
i.lines[i.cursorLineIndex] = curLine[:i.cursorLinePos-1] + curLine[i.cursorLinePos:]
i.cursorLinePos--
}
func (i *Input) addString(key string) {
if len(i.lines) > 0 {
if key == NEW_LINE {
// special case when we go back to the beginning of a buffer with multiple lines, prepend a new line
if i.cursorLineIndex == 0 && len(i.lines) > 1 {
i.lines = append([]string{""}, i.lines...)
// this case handles newlines at the end of the file or in the middle of the file
} else {
newString := ""
// if we are inserting a newline in a populated line then set what goes into the new line
// and what stays in the current line
if i.cursorLinePos < len(i.lines[i.cursorLineIndex]) {
newString = i.lines[i.cursorLineIndex][i.cursorLinePos:]
i.lines[i.cursorLineIndex] = i.lines[i.cursorLineIndex][:i.cursorLinePos]
}
// append a newline in the current position with the content we computed in the previous if statement
i.lines = append(
i.lines[:i.cursorLineIndex+1],
append(
[]string{newString},
i.lines[i.cursorLineIndex+1:]...,
)...,
)
}
// increment the line index, reset the cursor to the beginning and return to skip the next step
i.cursorLineIndex++
i.cursorLinePos = 0
return
}
// cursor is at the end of the line
if i.cursorLinePos == len(i.lines[i.cursorLineIndex]) {
//i.debugMessage ="end"
i.lines[i.cursorLineIndex] += key
// cursor at the beginning of the line
} else if i.cursorLinePos == 0 {
//i.debugMessage = "beginning"
i.lines[i.cursorLineIndex] = key + i.lines[i.cursorLineIndex]
// cursor in the middle of the line
} else {
//i.debugMessage = "middle"
before := i.lines[i.cursorLineIndex][:i.cursorLinePos]
after := i.lines[i.cursorLineIndex][i.cursorLinePos:]
i.lines[i.cursorLineIndex] = before + key + after
}
i.cursorLinePos += len(key)
} else {
//i.debugMessage = "newline"
i.lines = append(i.lines, key)
i.cursorLinePos += len(key)
}
}
func (i *Input) moveUp() {
// if we are already on the first line then just move the cursor to the beginning
if i.cursorLineIndex == 0 {
i.cursorLinePos = 0
return
}
// The previous line is just as long, we can move to the same position in the line
prevLine := i.lines[i.cursorLineIndex-1]
if len(prevLine) >= i.cursorLinePos {
i.cursorLineIndex--
} else {
// otherwise we move the cursor to the end of the previous line
i.cursorLineIndex--
i.cursorLinePos = len(prevLine) - 1
}
}
func (i *Input) moveDown() {
// we are already on the last line, we just need to move the position to the end of the line
if i.cursorLineIndex == len(i.lines)-1 {
i.cursorLinePos = len(i.lines[i.cursorLineIndex])
return
}
// check if the cursor can move to the same position in the next line, otherwise move it to the end
nextLine := i.lines[i.cursorLineIndex+1]
if len(nextLine) >= i.cursorLinePos {
i.cursorLineIndex++
} else {
i.cursorLineIndex++
i.cursorLinePos = len(nextLine) - 1
}
}
func (i *Input) moveLeft()
|
func (i *Input) moveRight() {
// if we are at the end of the line move to the next
if i.cursorLinePos >= len(i.lines[i.cursorLineIndex]) {
origLine := i.cursorLineIndex
i.moveDown()
if origLine < len(i.lines)-1 {
i.cursorLinePos = 0
}
return
}
i.cursorLinePos++
}
|
{
// if we are at the beginning of the line move the cursor to the previous line
if i.cursorLinePos == 0 {
origLine := i.cursorLineIndex
i.moveUp()
if origLine > 0 {
i.cursorLinePos = len(i.lines[i.cursorLineIndex])
}
return
}
i.cursorLinePos--
}
|
identifier_body
|
input.go
|
Input struct {
Block
TextFgColor Attribute
TextBgColor Attribute
IsCapturing bool
IsMultiLine bool
TextBuilder TextBuilder
SpecialChars map[string]string
ShowLineNo bool
Name string
CursorX int
CursorY int
//DebugMode bool
//debugMessage string
// internal vars
lines []string
cursorLineIndex int
cursorLinePos int
}
// NewInput returns a new, initialized Input object. The method receives the initial content for the input (if any)
// and whether it should be initialized as a multi-line innput field or not
func NewInput(s string, isMultiLine bool) *Input {
textArea := &Input{
Block: *NewBlock(),
TextFgColor: ThemeAttr("par.text.fg"),
TextBgColor: ThemeAttr("par.text.bg"),
TextBuilder: NewMarkdownTxBuilder(),
IsMultiLine: isMultiLine,
ShowLineNo: false,
cursorLineIndex: 0,
cursorLinePos: 0,
}
if s != "" {
textArea.SetText(s)
}
if isMultiLine {
textArea.SpecialChars = multiLineCharMap
} else {
textArea.SpecialChars = singleLineCharMap
}
return textArea
}
// StartCapture begins catching events from the /sys/kbd stream and updates the content of the Input field. While
// capturing events, the Input field also publishes its own event stream under the /input/kbd path.
func (i *Input) StartCapture() {
i.IsCapturing = true
Handle("/sys/kbd", func(e Event) {
if i.IsCapturing {
key := e.Data.(EvtKbd).KeyStr
switch key {
case "<up>":
i.moveUp()
case "<down>":
i.moveDown()
case "<left>":
i.moveLeft()
case "<right>":
i.moveRight()
case "C-8":
i.backspace()
default:
// If it's a CTRL something we don't handle then just ignore it
if strings.HasPrefix(key, "C-") {
break
}
newString := i.getCharString(key)
i.addString(newString)
}
if i.Name == "" {
SendCustomEvt("/input/kbd", i.getInputEvt(key))
} else {
SendCustomEvt("/input/" + i.Name + "/kbd", i.getInputEvt(key))
}
Render(i)
}
})
}
// StopCapture tells the Input field to stop accepting events from the /sys/kbd stream
func (i *Input) StopCapture() {
i.IsCapturing = false
}
// Text returns the text of the input field as a string
func (i *Input) Text() string {
if len(i.lines) == 0 {
return ""
}
if len(i.lines) == 1 {
return i.lines[0]
}
if i.IsMultiLine {
return strings.Join(i.lines, NEW_LINE)
} else {
// we should never get here!
return i.lines[0]
}
}
func (i *Input) SetText(text string) {
i.lines = strings.Split(text, NEW_LINE)
}
// Lines returns the slice of strings with the content of the input field. By default lines are separated by \n
func (i *Input) Lines() []string {
return i.lines
}
// Private methods for the input field
// TODO: handle delete key
func (i *Input) backspace() {
curLine := i.lines[i.cursorLineIndex]
// at the beginning of the buffer, nothing to do
if len(curLine) == 0 && i.cursorLineIndex == 0 {
return
}
// at the beginning of a line somewhere in the buffer
if i.cursorLinePos == 0 {
prevLine := i.lines[i.cursorLineIndex-1]
// remove the newline character from the prevline
prevLine = prevLine[:len(curLine)-1] + curLine
i.lines = append(i.lines[:i.cursorLineIndex], i.lines[i.cursorLineIndex+1:]...)
i.cursorLineIndex--
i.cursorLinePos = len(prevLine) - 1
return
}
// I'm at the end of a line
if i.cursorLinePos == len(curLine)-1 {
i.lines[i.cursorLineIndex] = curLine[:len(curLine)-1]
i.cursorLinePos--
return
}
// I'm in the middle of a line
i.lines[i.cursorLineIndex] = curLine[:i.cursorLinePos-1] + curLine[i.cursorLinePos:]
i.cursorLinePos--
}
func (i *Input) addString(key string) {
if len(i.lines) > 0 {
if key == NEW_LINE {
// special case when we go back to the beginning of a buffer with multiple lines, prepend a new line
if i.cursorLineIndex == 0 && len(i.lines) > 1 {
i.lines = append([]string{""}, i.lines...)
// this case handles newlines at the end of the file or in the middle of the file
} else {
newString := ""
// if we are inserting a newline in a populated line then set what goes into the new line
// and what stays in the current line
if i.cursorLinePos < len(i.lines[i.cursorLineIndex])
|
// append a newline in the current position with the content we computed in the previous if statement
i.lines = append(
i.lines[:i.cursorLineIndex+1],
append(
[]string{newString},
i.lines[i.cursorLineIndex+1:]...,
)...,
)
}
// increment the line index, reset the cursor to the beginning and return to skip the next step
i.cursorLineIndex++
i.cursorLinePos = 0
return
}
// cursor is at the end of the line
if i.cursorLinePos == len(i.lines[i.cursorLineIndex]) {
//i.debugMessage ="end"
i.lines[i.cursorLineIndex] += key
// cursor at the beginning of the line
} else if i.cursorLinePos == 0 {
//i.debugMessage = "beginning"
i.lines[i.cursorLineIndex] = key + i.lines[i.cursorLineIndex]
// cursor in the middle of the line
} else {
//i.debugMessage = "middle"
before := i.lines[i.cursorLineIndex][:i.cursorLinePos]
after := i.lines[i.cursorLineIndex][i.cursorLinePos:]
i.lines[i.cursorLineIndex] = before + key + after
}
i.cursorLinePos += len(key)
} else {
//i.debugMessage = "newline"
i.lines = append(i.lines, key)
i.cursorLinePos += len(key)
}
}
func (i *Input) moveUp() {
// if we are already on the first line then just move the cursor to the beginning
if i.cursorLineIndex == 0 {
i.cursorLinePos = 0
return
}
// The previous line is just as long, we can move to the same position in the line
prevLine := i.lines[i.cursorLineIndex-1]
if len(prevLine) >= i.cursorLinePos {
i.cursorLineIndex--
} else {
// otherwise we move the cursor to the end of the previous line
i.cursorLineIndex--
i.cursorLinePos = len(prevLine) - 1
}
}
func (i *Input) moveDown() {
// we are already on the last line, we just need to move the position to the end of the line
if i.cursorLineIndex == len(i.lines)-1 {
i.cursorLinePos = len(i.lines[i.cursorLineIndex])
return
}
// check if the cursor can move to the same position in the next line, otherwise move it to the end
nextLine := i.lines[i.cursorLineIndex+1]
if len(nextLine) >= i.cursorLinePos {
i.cursorLineIndex++
} else {
i.cursorLineIndex++
i.cursorLinePos = len(nextLine) - 1
}
}
func (i *Input) moveLeft() {
// if we are at the beginning of the line move the cursor to the previous line
if i.cursorLinePos == 0 {
origLine := i.cursorLineIndex
i.moveUp()
if origLine > 0 {
i.cursorLinePos = len(i.lines[i.cursorLineIndex])
}
return
}
i.cursorLinePos--
}
func (i *Input) moveRight() {
// if we are at the end of the line move to the next
if i.cursorLinePos >= len(i.lines[i.cursorLineIndex]) {
origLine := i.cursorLineIndex
i.moveDown()
if origLine < len(i.lines)-1 {
i.cursorLinePos = 0
}
return
}
i.cursorLinePos++
}
|
{
newString = i.lines[i.cursorLineIndex][i.cursorLinePos:]
i.lines[i.cursorLineIndex] = i.lines[i.cursorLineIndex][:i.cursorLinePos]
}
|
conditional_block
|
input.go
|
Text(text string) {
i.lines = strings.Split(text, NEW_LINE)
}
// Lines returns the slice of strings with the content of the input field. By default lines are separated by \n
func (i *Input) Lines() []string {
return i.lines
}
// Private methods for the input field
// TODO: handle delete key
func (i *Input) backspace() {
curLine := i.lines[i.cursorLineIndex]
// at the beginning of the buffer, nothing to do
if len(curLine) == 0 && i.cursorLineIndex == 0 {
return
}
// at the beginning of a line somewhere in the buffer
if i.cursorLinePos == 0 {
prevLine := i.lines[i.cursorLineIndex-1]
// remove the newline character from the prevline
prevLine = prevLine[:len(curLine)-1] + curLine
i.lines = append(i.lines[:i.cursorLineIndex], i.lines[i.cursorLineIndex+1:]...)
i.cursorLineIndex--
i.cursorLinePos = len(prevLine) - 1
return
}
// I'm at the end of a line
if i.cursorLinePos == len(curLine)-1 {
i.lines[i.cursorLineIndex] = curLine[:len(curLine)-1]
i.cursorLinePos--
return
}
// I'm in the middle of a line
i.lines[i.cursorLineIndex] = curLine[:i.cursorLinePos-1] + curLine[i.cursorLinePos:]
i.cursorLinePos--
}
func (i *Input) addString(key string) {
if len(i.lines) > 0 {
if key == NEW_LINE {
// special case when we go back to the beginning of a buffer with multiple lines, prepend a new line
if i.cursorLineIndex == 0 && len(i.lines) > 1 {
i.lines = append([]string{""}, i.lines...)
// this case handles newlines at the end of the file or in the middle of the file
} else {
newString := ""
// if we are inserting a newline in a populated line then set what goes into the new line
// and what stays in the current line
if i.cursorLinePos < len(i.lines[i.cursorLineIndex]) {
newString = i.lines[i.cursorLineIndex][i.cursorLinePos:]
i.lines[i.cursorLineIndex] = i.lines[i.cursorLineIndex][:i.cursorLinePos]
}
// append a newline in the current position with the content we computed in the previous if statement
i.lines = append(
i.lines[:i.cursorLineIndex+1],
append(
[]string{newString},
i.lines[i.cursorLineIndex+1:]...,
)...,
)
}
// increment the line index, reset the cursor to the beginning and return to skip the next step
i.cursorLineIndex++
i.cursorLinePos = 0
return
}
// cursor is at the end of the line
if i.cursorLinePos == len(i.lines[i.cursorLineIndex]) {
//i.debugMessage ="end"
i.lines[i.cursorLineIndex] += key
// cursor at the beginning of the line
} else if i.cursorLinePos == 0 {
//i.debugMessage = "beginning"
i.lines[i.cursorLineIndex] = key + i.lines[i.cursorLineIndex]
// cursor in the middle of the line
} else {
//i.debugMessage = "middle"
before := i.lines[i.cursorLineIndex][:i.cursorLinePos]
after := i.lines[i.cursorLineIndex][i.cursorLinePos:]
i.lines[i.cursorLineIndex] = before + key + after
}
i.cursorLinePos += len(key)
} else {
//i.debugMessage = "newline"
i.lines = append(i.lines, key)
i.cursorLinePos += len(key)
}
}
func (i *Input) moveUp() {
// if we are already on the first line then just move the cursor to the beginning
if i.cursorLineIndex == 0 {
i.cursorLinePos = 0
return
}
// The previous line is just as long, we can move to the same position in the line
prevLine := i.lines[i.cursorLineIndex-1]
if len(prevLine) >= i.cursorLinePos {
i.cursorLineIndex--
} else {
// otherwise we move the cursor to the end of the previous line
i.cursorLineIndex--
i.cursorLinePos = len(prevLine) - 1
}
}
func (i *Input) moveDown() {
// we are already on the last line, we just need to move the position to the end of the line
if i.cursorLineIndex == len(i.lines)-1 {
i.cursorLinePos = len(i.lines[i.cursorLineIndex])
return
}
// check if the cursor can move to the same position in the next line, otherwise move it to the end
nextLine := i.lines[i.cursorLineIndex+1]
if len(nextLine) >= i.cursorLinePos {
i.cursorLineIndex++
} else {
i.cursorLineIndex++
i.cursorLinePos = len(nextLine) - 1
}
}
func (i *Input) moveLeft() {
// if we are at the beginning of the line move the cursor to the previous line
if i.cursorLinePos == 0 {
origLine := i.cursorLineIndex
i.moveUp()
if origLine > 0 {
i.cursorLinePos = len(i.lines[i.cursorLineIndex])
}
return
}
i.cursorLinePos--
}
func (i *Input) moveRight() {
// if we are at the end of the line move to the next
if i.cursorLinePos >= len(i.lines[i.cursorLineIndex]) {
origLine := i.cursorLineIndex
i.moveDown()
if origLine < len(i.lines)-1 {
i.cursorLinePos = 0
}
return
}
i.cursorLinePos++
}
// Buffer implements Bufferer interface.
func (i *Input) Buffer() Buffer {
buf := i.Block.Buffer()
// offset used to display the line numbers
textXOffset := 0
bufferLines := i.lines[:]
firstLine := 0
lastLine := i.innerArea.Dy()
if i.IsMultiLine {
if i.cursorLineIndex >= lastLine {
firstLine += i.cursorLineIndex - lastLine + 1
lastLine += i.cursorLineIndex - lastLine + 1
}
if len(i.lines) < lastLine {
bufferLines = i.lines[firstLine:]
} else {
bufferLines = i.lines[firstLine:lastLine]
}
}
if i.ShowLineNo {
// forcing space for up to 1K
if lastLine < LINE_NO_MIN_SPACE {
textXOffset = len(strconv.Itoa(LINE_NO_MIN_SPACE)) + 2
} else {
textXOffset = len(strconv.Itoa(lastLine)) + 2 // one space at the beginning and one at the end
}
}
text := strings.Join(bufferLines, NEW_LINE)
// if the last line is empty then we add a fake space to make sure line numbers are displayed
if len(bufferLines) > 0 && bufferLines[len(bufferLines)-1] == "" && i.ShowLineNo {
text += " "
}
fg, bg := i.TextFgColor, i.TextBgColor
cs := i.TextBuilder.Build(text, fg, bg)
y, x, n := 0, 0, 0
lineNoCnt := 1
for n < len(cs) {
w := cs[n].Width()
if x == 0 && i.ShowLineNo {
curLineNoString := " " + strconv.Itoa(lineNoCnt) +
strings.Join(make([]string, textXOffset-len(strconv.Itoa(lineNoCnt))-1), " ")
//i.debugMessage = "Line no: " + curLineNoString
curLineNoRunes := i.TextBuilder.Build(curLineNoString, fg, bg)
for lineNo := 0; lineNo < len(curLineNoRunes); lineNo++ {
buf.Set(i.innerArea.Min.X+x+lineNo, i.innerArea.Min.Y+y, curLineNoRunes[lineNo])
}
lineNoCnt++
}
if cs[n].Ch == '\n' {
y++
n++
x = 0 // set x = 0
continue
}
buf.Set(i.innerArea.Min.X+x+textXOffset, i.innerArea.Min.Y+y, cs[n])
n++
x += w
}
cursorXOffset := i.X + textXOffset
if i.BorderLeft {
cursorXOffset++
}
cursorYOffset := i.Y// termui.TermHeight() - i.innerArea.Dy()
if i.BorderTop {
cursorYOffset++
}
if lastLine > i.innerArea.Dy() {
cursorYOffset += i.innerArea.Dy() - 1
} else {
|
cursorYOffset += i.cursorLineIndex
}
if i.IsCapturing {
i.CursorX = i.cursorLinePos+cursorXOffset
i.CursorY = cursorYOffset
|
random_line_split
|
|
input.go
|
type Input struct {
Block
TextFgColor Attribute
TextBgColor Attribute
IsCapturing bool
IsMultiLine bool
TextBuilder TextBuilder
SpecialChars map[string]string
ShowLineNo bool
Name string
CursorX int
CursorY int
//DebugMode bool
//debugMessage string
// internal vars
lines []string
cursorLineIndex int
cursorLinePos int
}
// NewInput returns a new, initialized Input object. The method receives the initial content for the input (if any)
// and whether it should be initialized as a multi-line innput field or not
func NewInput(s string, isMultiLine bool) *Input {
textArea := &Input{
Block: *NewBlock(),
TextFgColor: ThemeAttr("par.text.fg"),
TextBgColor: ThemeAttr("par.text.bg"),
TextBuilder: NewMarkdownTxBuilder(),
IsMultiLine: isMultiLine,
ShowLineNo: false,
cursorLineIndex: 0,
cursorLinePos: 0,
}
if s != "" {
textArea.SetText(s)
}
if isMultiLine {
textArea.SpecialChars = multiLineCharMap
} else {
textArea.SpecialChars = singleLineCharMap
}
return textArea
}
// StartCapture begins catching events from the /sys/kbd stream and updates the content of the Input field. While
// capturing events, the Input field also publishes its own event stream under the /input/kbd path.
func (i *Input) StartCapture() {
i.IsCapturing = true
Handle("/sys/kbd", func(e Event) {
if i.IsCapturing {
key := e.Data.(EvtKbd).KeyStr
switch key {
case "<up>":
i.moveUp()
case "<down>":
i.moveDown()
case "<left>":
i.moveLeft()
case "<right>":
i.moveRight()
case "C-8":
i.backspace()
default:
// If it's a CTRL something we don't handle then just ignore it
if strings.HasPrefix(key, "C-") {
break
}
newString := i.getCharString(key)
i.addString(newString)
}
if i.Name == "" {
SendCustomEvt("/input/kbd", i.getInputEvt(key))
} else {
SendCustomEvt("/input/" + i.Name + "/kbd", i.getInputEvt(key))
}
Render(i)
}
})
}
// StopCapture tells the Input field to stop accepting events from the /sys/kbd stream
func (i *Input) StopCapture() {
i.IsCapturing = false
}
// Text returns the text of the input field as a string
func (i *Input) Text() string {
if len(i.lines) == 0 {
return ""
}
if len(i.lines) == 1 {
return i.lines[0]
}
if i.IsMultiLine {
return strings.Join(i.lines, NEW_LINE)
} else {
// we should never get here!
return i.lines[0]
}
}
func (i *Input)
|
(text string) {
i.lines = strings.Split(text, NEW_LINE)
}
// Lines returns the slice of strings with the content of the input field. By default lines are separated by \n
func (i *Input) Lines() []string {
return i.lines
}
// Private methods for the input field
// TODO: handle delete key
func (i *Input) backspace() {
curLine := i.lines[i.cursorLineIndex]
// at the beginning of the buffer, nothing to do
if len(curLine) == 0 && i.cursorLineIndex == 0 {
return
}
// at the beginning of a line somewhere in the buffer
if i.cursorLinePos == 0 {
prevLine := i.lines[i.cursorLineIndex-1]
// remove the newline character from the prevline
prevLine = prevLine[:len(curLine)-1] + curLine
i.lines = append(i.lines[:i.cursorLineIndex], i.lines[i.cursorLineIndex+1:]...)
i.cursorLineIndex--
i.cursorLinePos = len(prevLine) - 1
return
}
// I'm at the end of a line
if i.cursorLinePos == len(curLine)-1 {
i.lines[i.cursorLineIndex] = curLine[:len(curLine)-1]
i.cursorLinePos--
return
}
// I'm in the middle of a line
i.lines[i.cursorLineIndex] = curLine[:i.cursorLinePos-1] + curLine[i.cursorLinePos:]
i.cursorLinePos--
}
func (i *Input) addString(key string) {
if len(i.lines) > 0 {
if key == NEW_LINE {
// special case when we go back to the beginning of a buffer with multiple lines, prepend a new line
if i.cursorLineIndex == 0 && len(i.lines) > 1 {
i.lines = append([]string{""}, i.lines...)
// this case handles newlines at the end of the file or in the middle of the file
} else {
newString := ""
// if we are inserting a newline in a populated line then set what goes into the new line
// and what stays in the current line
if i.cursorLinePos < len(i.lines[i.cursorLineIndex]) {
newString = i.lines[i.cursorLineIndex][i.cursorLinePos:]
i.lines[i.cursorLineIndex] = i.lines[i.cursorLineIndex][:i.cursorLinePos]
}
// append a newline in the current position with the content we computed in the previous if statement
i.lines = append(
i.lines[:i.cursorLineIndex+1],
append(
[]string{newString},
i.lines[i.cursorLineIndex+1:]...,
)...,
)
}
// increment the line index, reset the cursor to the beginning and return to skip the next step
i.cursorLineIndex++
i.cursorLinePos = 0
return
}
// cursor is at the end of the line
if i.cursorLinePos == len(i.lines[i.cursorLineIndex]) {
//i.debugMessage ="end"
i.lines[i.cursorLineIndex] += key
// cursor at the beginning of the line
} else if i.cursorLinePos == 0 {
//i.debugMessage = "beginning"
i.lines[i.cursorLineIndex] = key + i.lines[i.cursorLineIndex]
// cursor in the middle of the line
} else {
//i.debugMessage = "middle"
before := i.lines[i.cursorLineIndex][:i.cursorLinePos]
after := i.lines[i.cursorLineIndex][i.cursorLinePos:]
i.lines[i.cursorLineIndex] = before + key + after
}
i.cursorLinePos += len(key)
} else {
//i.debugMessage = "newline"
i.lines = append(i.lines, key)
i.cursorLinePos += len(key)
}
}
func (i *Input) moveUp() {
// if we are already on the first line then just move the cursor to the beginning
if i.cursorLineIndex == 0 {
i.cursorLinePos = 0
return
}
// The previous line is just as long, we can move to the same position in the line
prevLine := i.lines[i.cursorLineIndex-1]
if len(prevLine) >= i.cursorLinePos {
i.cursorLineIndex--
} else {
// otherwise we move the cursor to the end of the previous line
i.cursorLineIndex--
i.cursorLinePos = len(prevLine) - 1
}
}
func (i *Input) moveDown() {
// we are already on the last line, we just need to move the position to the end of the line
if i.cursorLineIndex == len(i.lines)-1 {
i.cursorLinePos = len(i.lines[i.cursorLineIndex])
return
}
// check if the cursor can move to the same position in the next line, otherwise move it to the end
nextLine := i.lines[i.cursorLineIndex+1]
if len(nextLine) >= i.cursorLinePos {
i.cursorLineIndex++
} else {
i.cursorLineIndex++
i.cursorLinePos = len(nextLine) - 1
}
}
func (i *Input) moveLeft() {
// if we are at the beginning of the line move the cursor to the previous line
if i.cursorLinePos == 0 {
origLine := i.cursorLineIndex
i.moveUp()
if origLine > 0 {
i.cursorLinePos = len(i.lines[i.cursorLineIndex])
}
return
}
i.cursorLinePos--
}
func (i *Input) moveRight() {
// if we are at the end of the line move to the next
if i.cursorLinePos >= len(i.lines[i.cursorLineIndex]) {
origLine := i.cursorLineIndex
i.moveDown()
if origLine < len(i.lines)-1 {
i.cursorLinePos = 0
}
return
}
i.cursorLinePos++
}
//
|
SetText
|
identifier_name
|
blk_device.rs
|
device represents either a disk with no partitions or a disk
//! partition of an acceptable type (Linux filesystem partitions only at
//! present)
//! - the device currently contains no filesystem or volume id (although this
//! logically implies that the device is not currently mounted, for the sake
//! of consistency, the mount table is also checked to ENSURE that the device
//! is not mounted)
use std::{
collections::HashMap,
ffi::{OsStr, OsString},
io::Error,
};
use proc_mounts::{MountInfo, MountIter};
use rpc::mayastor::{
block_device::{Filesystem, Partition},
BlockDevice,
};
use udev::{Device, Enumerator};
// Struct representing a property value in a udev::Device struct (and possibly
// elsewhere). It is used to provide conversions via various "From" trait
// implementations below.
struct Property<'a>(Option<&'a OsStr>);
impl From<Property<'_>> for String {
fn from(property: Property) -> Self {
String::from(property.0.map(|s| s.to_str()).flatten().unwrap_or(""))
}
}
impl From<Property<'_>> for Option<String> {
fn from(property: Property) -> Self {
property.0.map(|s| s.to_str()).flatten().map(String::from)
}
}
impl From<Property<'_>> for Option<u32> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u32 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
impl From<Property<'_>> for Option<u64> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u64 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
// Determine the type of devices which may be potentially presented
// as "available" for use.
fn usable_device(devmajor: &u32) -> bool {
const DEVICE_TYPES: [u32; 4] = [
7, // Loopback devices
8, // SCSI disk devices
43, // Network block devices
259, // Block Extended Major
];
if DEVICE_TYPES.iter().any(|m| m == devmajor) {
return true;
}
// TODO: add extra logic here as needed for devices with dynamically
// allocated major numbers
false
}
// Determine the type of partitions which may be potentially presented
// as "available" for use
fn usable_partition(partition: &Option<Partition>) -> bool {
const GPT_PARTITION_TYPES: [&str; 1] = [
"0fc63daf-8483-4772-8e79-3d69d8477de4", // Linux
];
const MBR_PARTITION_TYPES: [&str; 1] = [
"0x83", // Linux
];
if let Some(part) = partition {
if part.scheme == "gpt" {
return GPT_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
if part.scheme == "dos" {
return MBR_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
return false;
}
true
}
// Determine if device is provided internally via mayastor.
// At present this simply involves examining the value of
// the udev "ID_MODEL" property.
fn mayastor_device(device: &Device) -> bool {
matches!(
device
.property_value("ID_MODEL")
.map(|s| s.to_str())
.flatten(),
Some("Mayastor NVMe controller") | Some("Nexus_CAS_Driver")
)
}
// Create a new Partition object from udev::Device properties
fn new_partition(parent: Option<&str>, device: &Device) -> Option<Partition> {
if let Some(devtype) = device.property_value("DEVTYPE") {
if devtype.to_str() == Some("partition") {
return Some(Partition {
parent: String::from(parent.unwrap_or("")),
number: Property(device.property_value("PARTN")).into(),
name: Property(device.property_value("PARTNAME")).into(),
scheme: Property(device.property_value("ID_PART_ENTRY_SCHEME"))
.into(),
typeid: Property(device.property_value("ID_PART_ENTRY_TYPE"))
.into(),
uuid: Property(device.property_value("ID_PART_ENTRY_UUID"))
.into(),
});
}
}
None
}
// Create a new Filesystem object from udev::Device properties
// and the list of current filesystem mounts.
// Note that the result can be None if there is no filesystem
// associated with this Device.
fn new_filesystem(
device: &Device,
mountinfo: Option<&MountInfo>,
) -> Option<Filesystem> {
let mut fstype: Option<String> =
Property(device.property_value("ID_FS_TYPE")).into();
if fstype.is_none() {
fstype = mountinfo.map(|m| m.fstype.clone());
}
let label: Option<String> =
Property(device.property_value("ID_FS_LABEL")).into();
let uuid: Option<String> =
Property(device.property_value("ID_FS_UUID")).into();
// Do no return an actual object if none of the fields therein have actual
// values.
if fstype.is_none()
&& label.is_none()
&& uuid.is_none()
&& mountinfo.is_none()
{
return None;
}
Some(Filesystem {
fstype: fstype.unwrap_or_else(|| String::from("")),
label: label.unwrap_or_else(|| String::from("")),
uuid: uuid.unwrap_or_else(|| String::from("")),
mountpoint: mountinfo
.map(|m| String::from(m.dest.to_string_lossy()))
.unwrap_or_else(|| String::from("")),
})
}
// Create a new BlockDevice object from collected information.
// This function also contains the logic for determining whether
// or not the device that this represents is "available" for use.
fn new_device(
parent: Option<&str>,
include: bool,
device: &Device,
mounts: &HashMap<OsString, MountInfo>,
) -> Option<BlockDevice> {
if let Some(devname) = device.property_value("DEVNAME") {
let partition = new_partition(parent, device);
let filesystem = new_filesystem(device, mounts.get(devname));
let devmajor: u32 = Property(device.property_value("MAJOR")).into();
let size: u64 = Property(device.attribute_value("size")).into();
let available = include
&& size > 0
&& !mayastor_device(device)
&& usable_device(&devmajor)
&& (partition.is_none() || usable_partition(&partition))
&& filesystem.is_none();
return Some(BlockDevice {
devname: String::from(devname.to_str().unwrap_or("")),
devtype: Property(device.property_value("DEVTYPE")).into(),
devmajor,
devminor: Property(device.property_value("MINOR")).into(),
model: Property(device.property_value("ID_MODEL")).into(),
devpath: Property(device.property_value("DEVPATH")).into(),
devlinks: device
.property_value("DEVLINKS")
.map(|s| s.to_str())
.flatten()
.unwrap_or("")
.split(' ')
.filter(|&s| !s.is_empty())
.map(String::from)
.collect(),
size,
partition,
filesystem,
available,
});
}
None
}
// Get the list of current filesystem mounts.
fn get_mounts() -> Result<HashMap<OsString, MountInfo>, Error> {
let mut table: HashMap<OsString, MountInfo> = HashMap::new();
for mount in (MountIter::new()?).flatten() {
table.insert(OsString::from(mount.source.clone()), mount);
}
Ok(table)
}
// Iterate through udev to generate a list of all (block) devices
// with DEVTYPE == "disk"
fn get_disks(
all: bool,
mounts: &HashMap<OsString, MountInfo>,
) -> Result<Vec<BlockDevice>, Error> {
let mut list: Vec<BlockDevice> = Vec::new();
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("block")?;
enumerator.match_property("DEVTYPE", "disk")?;
for entry in enumerator.scan_devices()? {
if let Some(devname) = entry.property_value("DEVNAME") {
let partitions = get_partitions(devname.to_str(), &entry, mounts)?;
if let Some(device) =
new_device(None, partitions.is_empty(), &entry, mounts)
{
if all || device.available {
list.push(device);
}
}
for device in partitions {
if all || device.available {
list.push(device);
}
}
}
}
Ok(list)
}
// Iterate through udev to generate a list of all (block) devices
// associated with parent device <disk>
fn
|
get_partitions
|
identifier_name
|
|
blk_device.rs
|
satisfies the following
//! criteria:
//! - the device has a non-zero size
//! - the device is of an acceptable type as determined by well known device
//! numbers (eg. SCSI disks)
//! - the device represents either a disk with no partitions or a disk
//! partition of an acceptable type (Linux filesystem partitions only at
//! present)
//! - the device currently contains no filesystem or volume id (although this
//! logically implies that the device is not currently mounted, for the sake
//! of consistency, the mount table is also checked to ENSURE that the device
//! is not mounted)
use std::{
collections::HashMap,
ffi::{OsStr, OsString},
io::Error,
};
use proc_mounts::{MountInfo, MountIter};
use rpc::mayastor::{
block_device::{Filesystem, Partition},
BlockDevice,
};
use udev::{Device, Enumerator};
// Struct representing a property value in a udev::Device struct (and possibly
// elsewhere). It is used to provide conversions via various "From" trait
// implementations below.
struct Property<'a>(Option<&'a OsStr>);
impl From<Property<'_>> for String {
fn from(property: Property) -> Self {
String::from(property.0.map(|s| s.to_str()).flatten().unwrap_or(""))
}
}
impl From<Property<'_>> for Option<String> {
fn from(property: Property) -> Self {
property.0.map(|s| s.to_str()).flatten().map(String::from)
}
}
impl From<Property<'_>> for Option<u32> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u32 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
impl From<Property<'_>> for Option<u64> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u64 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
// Determine the type of devices which may be potentially presented
// as "available" for use.
fn usable_device(devmajor: &u32) -> bool {
const DEVICE_TYPES: [u32; 4] = [
7, // Loopback devices
8, // SCSI disk devices
43, // Network block devices
259, // Block Extended Major
];
if DEVICE_TYPES.iter().any(|m| m == devmajor) {
return true;
}
// TODO: add extra logic here as needed for devices with dynamically
// allocated major numbers
false
}
// Determine the type of partitions which may be potentially presented
// as "available" for use
fn usable_partition(partition: &Option<Partition>) -> bool {
const GPT_PARTITION_TYPES: [&str; 1] = [
"0fc63daf-8483-4772-8e79-3d69d8477de4", // Linux
];
const MBR_PARTITION_TYPES: [&str; 1] = [
"0x83", // Linux
];
if let Some(part) = partition {
if part.scheme == "gpt" {
return GPT_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
if part.scheme == "dos" {
return MBR_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
return false;
}
true
}
// Determine if device is provided internally via mayastor.
// At present this simply involves examining the value of
// the udev "ID_MODEL" property.
fn mayastor_device(device: &Device) -> bool {
matches!(
device
.property_value("ID_MODEL")
.map(|s| s.to_str())
.flatten(),
Some("Mayastor NVMe controller") | Some("Nexus_CAS_Driver")
)
}
// Create a new Partition object from udev::Device properties
fn new_partition(parent: Option<&str>, device: &Device) -> Option<Partition> {
if let Some(devtype) = device.property_value("DEVTYPE") {
if devtype.to_str() == Some("partition") {
return Some(Partition {
parent: String::from(parent.unwrap_or("")),
number: Property(device.property_value("PARTN")).into(),
name: Property(device.property_value("PARTNAME")).into(),
scheme: Property(device.property_value("ID_PART_ENTRY_SCHEME"))
.into(),
typeid: Property(device.property_value("ID_PART_ENTRY_TYPE"))
.into(),
uuid: Property(device.property_value("ID_PART_ENTRY_UUID"))
.into(),
});
}
}
None
}
// Create a new Filesystem object from udev::Device properties
// and the list of current filesystem mounts.
// Note that the result can be None if there is no filesystem
// associated with this Device.
fn new_filesystem(
device: &Device,
mountinfo: Option<&MountInfo>,
) -> Option<Filesystem> {
let mut fstype: Option<String> =
Property(device.property_value("ID_FS_TYPE")).into();
if fstype.is_none() {
fstype = mountinfo.map(|m| m.fstype.clone());
}
let label: Option<String> =
Property(device.property_value("ID_FS_LABEL")).into();
let uuid: Option<String> =
Property(device.property_value("ID_FS_UUID")).into();
// Do no return an actual object if none of the fields therein have actual
// values.
if fstype.is_none()
&& label.is_none()
&& uuid.is_none()
&& mountinfo.is_none()
|
Some(Filesystem {
fstype: fstype.unwrap_or_else(|| String::from("")),
label: label.unwrap_or_else(|| String::from("")),
uuid: uuid.unwrap_or_else(|| String::from("")),
mountpoint: mountinfo
.map(|m| String::from(m.dest.to_string_lossy()))
.unwrap_or_else(|| String::from("")),
})
}
// Create a new BlockDevice object from collected information.
// This function also contains the logic for determining whether
// or not the device that this represents is "available" for use.
fn new_device(
parent: Option<&str>,
include: bool,
device: &Device,
mounts: &HashMap<OsString, MountInfo>,
) -> Option<BlockDevice> {
if let Some(devname) = device.property_value("DEVNAME") {
let partition = new_partition(parent, device);
let filesystem = new_filesystem(device, mounts.get(devname));
let devmajor: u32 = Property(device.property_value("MAJOR")).into();
let size: u64 = Property(device.attribute_value("size")).into();
let available = include
&& size > 0
&& !mayastor_device(device)
&& usable_device(&devmajor)
&& (partition.is_none() || usable_partition(&partition))
&& filesystem.is_none();
return Some(BlockDevice {
devname: String::from(devname.to_str().unwrap_or("")),
devtype: Property(device.property_value("DEVTYPE")).into(),
devmajor,
devminor: Property(device.property_value("MINOR")).into(),
model: Property(device.property_value("ID_MODEL")).into(),
devpath: Property(device.property_value("DEVPATH")).into(),
devlinks: device
.property_value("DEVLINKS")
.map(|s| s.to_str())
.flatten()
.unwrap_or("")
.split(' ')
.filter(|&s| !s.is_empty())
.map(String::from)
.collect(),
size,
partition,
filesystem,
available,
});
}
None
}
// Get the list of current filesystem mounts.
fn get_mounts() -> Result<HashMap<OsString, MountInfo>, Error> {
let mut table: HashMap<OsString, MountInfo> = HashMap::new();
for mount in (MountIter::new()?).flatten() {
table.insert(OsString::from(mount.source.clone()), mount);
}
Ok(table)
}
// Iterate through udev to generate a list of all (block) devices
// with DEVTYPE == "disk"
fn get_disks(
all: bool,
mounts: &HashMap<OsString, MountInfo>,
) -> Result<Vec<BlockDevice>, Error> {
let mut list: Vec<BlockDevice> = Vec::new();
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("block")?;
enumerator.match_property("DEVTYPE", "disk")?;
for entry in enumerator.scan_devices()? {
if let Some(devname) = entry.property_value("DEVNAME") {
let partitions = get_partitions(devname.to_str(), &entry, mounts)?;
if let Some(device) =
new_device(None, partitions.is_empty(), &entry, mounts)
{
if all || device.available {
list.push(device);
}
}
for device in partitions {
if all ||
|
{
return None;
}
|
conditional_block
|
blk_device.rs
|
satisfies the following
//! criteria:
//! - the device has a non-zero size
//! - the device is of an acceptable type as determined by well known device
//! numbers (eg. SCSI disks)
//! - the device represents either a disk with no partitions or a disk
//! partition of an acceptable type (Linux filesystem partitions only at
//! present)
//! - the device currently contains no filesystem or volume id (although this
//! logically implies that the device is not currently mounted, for the sake
//! of consistency, the mount table is also checked to ENSURE that the device
//! is not mounted)
use std::{
collections::HashMap,
ffi::{OsStr, OsString},
io::Error,
};
use proc_mounts::{MountInfo, MountIter};
use rpc::mayastor::{
block_device::{Filesystem, Partition},
BlockDevice,
};
use udev::{Device, Enumerator};
// Struct representing a property value in a udev::Device struct (and possibly
// elsewhere). It is used to provide conversions via various "From" trait
// implementations below.
struct Property<'a>(Option<&'a OsStr>);
impl From<Property<'_>> for String {
fn from(property: Property) -> Self {
String::from(property.0.map(|s| s.to_str()).flatten().unwrap_or(""))
}
}
impl From<Property<'_>> for Option<String> {
fn from(property: Property) -> Self {
property.0.map(|s| s.to_str()).flatten().map(String::from)
}
}
impl From<Property<'_>> for Option<u32> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u32 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
impl From<Property<'_>> for Option<u64> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u64 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
// Determine the type of devices which may be potentially presented
// as "available" for use.
fn usable_device(devmajor: &u32) -> bool {
const DEVICE_TYPES: [u32; 4] = [
7, // Loopback devices
8, // SCSI disk devices
43, // Network block devices
259, // Block Extended Major
];
if DEVICE_TYPES.iter().any(|m| m == devmajor) {
return true;
}
// TODO: add extra logic here as needed for devices with dynamically
// allocated major numbers
false
}
// Determine the type of partitions which may be potentially presented
// as "available" for use
fn usable_partition(partition: &Option<Partition>) -> bool {
const GPT_PARTITION_TYPES: [&str; 1] = [
"0fc63daf-8483-4772-8e79-3d69d8477de4", // Linux
];
const MBR_PARTITION_TYPES: [&str; 1] = [
"0x83", // Linux
];
if let Some(part) = partition {
if part.scheme == "gpt" {
return GPT_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
if part.scheme == "dos" {
return MBR_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
return false;
}
true
}
// Determine if device is provided internally via mayastor.
// At present this simply involves examining the value of
// the udev "ID_MODEL" property.
fn mayastor_device(device: &Device) -> bool {
matches!(
device
.property_value("ID_MODEL")
.map(|s| s.to_str())
.flatten(),
Some("Mayastor NVMe controller") | Some("Nexus_CAS_Driver")
)
}
// Create a new Partition object from udev::Device properties
fn new_partition(parent: Option<&str>, device: &Device) -> Option<Partition> {
if let Some(devtype) = device.property_value("DEVTYPE") {
if devtype.to_str() == Some("partition") {
return Some(Partition {
parent: String::from(parent.unwrap_or("")),
number: Property(device.property_value("PARTN")).into(),
name: Property(device.property_value("PARTNAME")).into(),
scheme: Property(device.property_value("ID_PART_ENTRY_SCHEME"))
.into(),
typeid: Property(device.property_value("ID_PART_ENTRY_TYPE"))
.into(),
uuid: Property(device.property_value("ID_PART_ENTRY_UUID"))
.into(),
});
}
}
None
}
// Create a new Filesystem object from udev::Device properties
// and the list of current filesystem mounts.
// Note that the result can be None if there is no filesystem
// associated with this Device.
fn new_filesystem(
device: &Device,
mountinfo: Option<&MountInfo>,
) -> Option<Filesystem> {
let mut fstype: Option<String> =
Property(device.property_value("ID_FS_TYPE")).into();
if fstype.is_none() {
fstype = mountinfo.map(|m| m.fstype.clone());
}
let label: Option<String> =
Property(device.property_value("ID_FS_LABEL")).into();
let uuid: Option<String> =
Property(device.property_value("ID_FS_UUID")).into();
// Do no return an actual object if none of the fields therein have actual
// values.
if fstype.is_none()
&& label.is_none()
&& uuid.is_none()
&& mountinfo.is_none()
{
return None;
}
Some(Filesystem {
fstype: fstype.unwrap_or_else(|| String::from("")),
label: label.unwrap_or_else(|| String::from("")),
uuid: uuid.unwrap_or_else(|| String::from("")),
mountpoint: mountinfo
.map(|m| String::from(m.dest.to_string_lossy()))
.unwrap_or_else(|| String::from("")),
})
}
// Create a new BlockDevice object from collected information.
// This function also contains the logic for determining whether
// or not the device that this represents is "available" for use.
fn new_device(
parent: Option<&str>,
include: bool,
device: &Device,
mounts: &HashMap<OsString, MountInfo>,
) -> Option<BlockDevice> {
if let Some(devname) = device.property_value("DEVNAME") {
let partition = new_partition(parent, device);
let filesystem = new_filesystem(device, mounts.get(devname));
let devmajor: u32 = Property(device.property_value("MAJOR")).into();
let size: u64 = Property(device.attribute_value("size")).into();
let available = include
&& size > 0
&& !mayastor_device(device)
&& usable_device(&devmajor)
&& (partition.is_none() || usable_partition(&partition))
&& filesystem.is_none();
return Some(BlockDevice {
devname: String::from(devname.to_str().unwrap_or("")),
devtype: Property(device.property_value("DEVTYPE")).into(),
devmajor,
devminor: Property(device.property_value("MINOR")).into(),
model: Property(device.property_value("ID_MODEL")).into(),
devpath: Property(device.property_value("DEVPATH")).into(),
devlinks: device
.property_value("DEVLINKS")
.map(|s| s.to_str())
.flatten()
.unwrap_or("")
.split(' ')
.filter(|&s| !s.is_empty())
.map(String::from)
.collect(),
size,
partition,
filesystem,
available,
});
}
None
}
// Get the list of current filesystem mounts.
fn get_mounts() -> Result<HashMap<OsString, MountInfo>, Error>
|
// Iterate through udev to generate a list of all (block) devices
// with DEVTYPE == "disk"
fn get_disks(
all: bool,
mounts: &HashMap<OsString, MountInfo>,
) -> Result<Vec<BlockDevice>, Error> {
let mut list: Vec<BlockDevice> = Vec::new();
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("block")?;
enumerator.match_property("DEVTYPE", "disk")?;
for entry in enumerator.scan_devices()? {
if let Some(devname) = entry.property_value("DEVNAME") {
let partitions = get_partitions(devname.to_str(), &entry, mounts)?;
if let Some(device) =
new_device(None, partitions.is_empty(), &entry, mounts)
{
if all || device.available {
list.push(device);
}
}
for device in partitions {
if all
|
{
let mut table: HashMap<OsString, MountInfo> = HashMap::new();
for mount in (MountIter::new()?).flatten() {
table.insert(OsString::from(mount.source.clone()), mount);
}
Ok(table)
}
|
identifier_body
|
blk_device.rs
|
satisfies the following
//! criteria:
//! - the device has a non-zero size
//! - the device is of an acceptable type as determined by well known device
//! numbers (eg. SCSI disks)
//! - the device represents either a disk with no partitions or a disk
//! partition of an acceptable type (Linux filesystem partitions only at
//! present)
//! - the device currently contains no filesystem or volume id (although this
//! logically implies that the device is not currently mounted, for the sake
//! of consistency, the mount table is also checked to ENSURE that the device
//! is not mounted)
use std::{
collections::HashMap,
ffi::{OsStr, OsString},
io::Error,
};
use proc_mounts::{MountInfo, MountIter};
use rpc::mayastor::{
block_device::{Filesystem, Partition},
BlockDevice,
};
use udev::{Device, Enumerator};
// Struct representing a property value in a udev::Device struct (and possibly
// elsewhere). It is used to provide conversions via various "From" trait
// implementations below.
struct Property<'a>(Option<&'a OsStr>);
impl From<Property<'_>> for String {
fn from(property: Property) -> Self {
String::from(property.0.map(|s| s.to_str()).flatten().unwrap_or(""))
}
}
impl From<Property<'_>> for Option<String> {
fn from(property: Property) -> Self {
property.0.map(|s| s.to_str()).flatten().map(String::from)
}
}
impl From<Property<'_>> for Option<u32> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u32 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
impl From<Property<'_>> for Option<u64> {
fn from(property: Property) -> Self {
Option::<String>::from(property)
.map(|s| s.parse().ok())
.flatten()
}
}
impl From<Property<'_>> for u64 {
fn from(property: Property) -> Self {
Option::<Self>::from(property).unwrap_or(0)
}
}
// Determine the type of devices which may be potentially presented
// as "available" for use.
fn usable_device(devmajor: &u32) -> bool {
const DEVICE_TYPES: [u32; 4] = [
7, // Loopback devices
8, // SCSI disk devices
43, // Network block devices
259, // Block Extended Major
];
if DEVICE_TYPES.iter().any(|m| m == devmajor) {
return true;
}
// TODO: add extra logic here as needed for devices with dynamically
// allocated major numbers
false
}
// Determine the type of partitions which may be potentially presented
// as "available" for use
fn usable_partition(partition: &Option<Partition>) -> bool {
const GPT_PARTITION_TYPES: [&str; 1] = [
"0fc63daf-8483-4772-8e79-3d69d8477de4", // Linux
];
const MBR_PARTITION_TYPES: [&str; 1] = [
"0x83", // Linux
];
if let Some(part) = partition {
if part.scheme == "gpt" {
return GPT_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
if part.scheme == "dos" {
return MBR_PARTITION_TYPES.iter().any(|&s| s == part.typeid);
}
return false;
}
true
}
// Determine if device is provided internally via mayastor.
// At present this simply involves examining the value of
// the udev "ID_MODEL" property.
fn mayastor_device(device: &Device) -> bool {
matches!(
device
.property_value("ID_MODEL")
.map(|s| s.to_str())
.flatten(),
Some("Mayastor NVMe controller") | Some("Nexus_CAS_Driver")
)
}
// Create a new Partition object from udev::Device properties
fn new_partition(parent: Option<&str>, device: &Device) -> Option<Partition> {
if let Some(devtype) = device.property_value("DEVTYPE") {
if devtype.to_str() == Some("partition") {
return Some(Partition {
parent: String::from(parent.unwrap_or("")),
number: Property(device.property_value("PARTN")).into(),
name: Property(device.property_value("PARTNAME")).into(),
scheme: Property(device.property_value("ID_PART_ENTRY_SCHEME"))
.into(),
typeid: Property(device.property_value("ID_PART_ENTRY_TYPE"))
.into(),
uuid: Property(device.property_value("ID_PART_ENTRY_UUID"))
.into(),
});
}
}
None
}
// Create a new Filesystem object from udev::Device properties
// and the list of current filesystem mounts.
// Note that the result can be None if there is no filesystem
// associated with this Device.
fn new_filesystem(
device: &Device,
mountinfo: Option<&MountInfo>,
) -> Option<Filesystem> {
let mut fstype: Option<String> =
Property(device.property_value("ID_FS_TYPE")).into();
if fstype.is_none() {
fstype = mountinfo.map(|m| m.fstype.clone());
}
let label: Option<String> =
Property(device.property_value("ID_FS_LABEL")).into();
let uuid: Option<String> =
Property(device.property_value("ID_FS_UUID")).into();
// Do no return an actual object if none of the fields therein have actual
// values.
if fstype.is_none()
&& label.is_none()
&& uuid.is_none()
&& mountinfo.is_none()
{
return None;
}
Some(Filesystem {
fstype: fstype.unwrap_or_else(|| String::from("")),
label: label.unwrap_or_else(|| String::from("")),
uuid: uuid.unwrap_or_else(|| String::from("")),
mountpoint: mountinfo
.map(|m| String::from(m.dest.to_string_lossy()))
.unwrap_or_else(|| String::from("")),
})
}
// Create a new BlockDevice object from collected information.
// This function also contains the logic for determining whether
// or not the device that this represents is "available" for use.
fn new_device(
parent: Option<&str>,
include: bool,
device: &Device,
mounts: &HashMap<OsString, MountInfo>,
) -> Option<BlockDevice> {
if let Some(devname) = device.property_value("DEVNAME") {
let partition = new_partition(parent, device);
let filesystem = new_filesystem(device, mounts.get(devname));
let devmajor: u32 = Property(device.property_value("MAJOR")).into();
let size: u64 = Property(device.attribute_value("size")).into();
let available = include
&& size > 0
&& !mayastor_device(device)
&& usable_device(&devmajor)
&& (partition.is_none() || usable_partition(&partition))
&& filesystem.is_none();
return Some(BlockDevice {
devname: String::from(devname.to_str().unwrap_or("")),
devtype: Property(device.property_value("DEVTYPE")).into(),
devmajor,
devminor: Property(device.property_value("MINOR")).into(),
model: Property(device.property_value("ID_MODEL")).into(),
devpath: Property(device.property_value("DEVPATH")).into(),
devlinks: device
.property_value("DEVLINKS")
.map(|s| s.to_str())
.flatten()
.unwrap_or("")
.split(' ')
.filter(|&s| !s.is_empty())
.map(String::from)
.collect(),
size,
partition,
filesystem,
available,
|
// Get the list of current filesystem mounts.
fn get_mounts() -> Result<HashMap<OsString, MountInfo>, Error> {
let mut table: HashMap<OsString, MountInfo> = HashMap::new();
for mount in (MountIter::new()?).flatten() {
table.insert(OsString::from(mount.source.clone()), mount);
}
Ok(table)
}
// Iterate through udev to generate a list of all (block) devices
// with DEVTYPE == "disk"
fn get_disks(
all: bool,
mounts: &HashMap<OsString, MountInfo>,
) -> Result<Vec<BlockDevice>, Error> {
let mut list: Vec<BlockDevice> = Vec::new();
let mut enumerator = Enumerator::new()?;
enumerator.match_subsystem("block")?;
enumerator.match_property("DEVTYPE", "disk")?;
for entry in enumerator.scan_devices()? {
if let Some(devname) = entry.property_value("DEVNAME") {
let partitions = get_partitions(devname.to_str(), &entry, mounts)?;
if let Some(device) =
new_device(None, partitions.is_empty(), &entry, mounts)
{
if all || device.available {
list.push(device);
}
}
for device in partitions {
if all ||
|
});
}
None
}
|
random_line_split
|
tpm.rs
|
(context, ek_handle)
*/
pub(crate) fn create_ak(
ctx: &mut Context,
handle: KeyHandle,
) -> Result<(KeyHandle, Name, Vec<u8>)> {
let ak = ak::create_ak(
ctx,
handle,
HashingAlgorithm::Sha256,
SignatureScheme::RsaSsa,
None,
DefaultKey,
)?;
let ak_tpm2b_pub = ak.out_public;
let tpm2_pub_vec = pub_to_vec(ak_tpm2b_pub);
let ak_handle =
ak::load_ak(ctx, handle, None, ak.out_private, ak.out_public)?;
let (_, name, _) = ctx.read_public(ak_handle)?;
Ok((ak_handle, name, tpm2_pub_vec))
}
const TSS_MAGIC: u32 = 3135029470;
fn parse_cred_and_secret(
keyblob: Vec<u8>,
) -> Result<(IDObject, EncryptedSecret)> {
let magic = u32::from_be_bytes(keyblob[0..4].try_into().unwrap()); //#[allow_ci]
let version = u32::from_be_bytes(keyblob[4..8].try_into().unwrap()); //#[allow_ci]
if magic != TSS_MAGIC {
return Err(KeylimeError::Other(format!("Error parsing cred and secret; TSS_MAGIC number {} does not match expected value {}", magic, TSS_MAGIC)));
}
if version != 1 {
return Err(KeylimeError::Other(format!(
"Error parsing cred and secret; version {} is not 1",
version
)));
}
let credsize = u16::from_be_bytes(keyblob[8..10].try_into().unwrap()); //#[allow_ci]
let secretsize = u16::from_be_bytes(
keyblob[(10 + credsize as usize)..(12 + credsize as usize)]
.try_into()
.unwrap(), //#[allow_ci]
);
let credential = &keyblob[10..(10 + credsize as usize)];
let secret = &keyblob[(12 + credsize as usize)..];
let credential = IDObject::try_from(credential)?;
let secret = EncryptedSecret::try_from(secret)?;
Ok((credential, secret))
}
fn create_empty_session(
ctx: &mut Context,
ses_type: SessionType,
) -> Result<AuthSession> {
let session = ctx.start_auth_session(
None,
None,
None,
ses_type,
Cipher::aes_128_cfb().try_into()?,
HashingAlgorithm::Sha256,
)?;
let (ses_attrs, ses_attrs_mask) = SessionAttributesBuilder::new()
.with_encrypt(true)
.with_decrypt(true)
.build();
ctx.tr_sess_set_attributes(session.unwrap(), ses_attrs, ses_attrs_mask)?; //#[allow_ci]
Ok(session.unwrap()) //#[allow_ci]
}
pub(crate) fn activate_credential(
ctx: &mut Context,
keyblob: Vec<u8>,
ak: KeyHandle,
ek: KeyHandle,
) -> Result<Digest> {
let (credential, secret) = parse_cred_and_secret(keyblob)?;
let ek_auth = create_empty_session(ctx, SessionType::Policy)?;
// We authorize ses2 with PolicySecret(ENDORSEMENT) as per PolicyA
let _ = ctx.execute_with_nullauth_session(|context| {
context.policy_secret(
ek_auth.try_into()?,
AuthHandle::Endorsement,
Default::default(),
Default::default(),
Default::default(),
None,
)
})?;
let resp = ctx
.execute_with_sessions(
(Some(AuthSession::Password), Some(ek_auth), None),
|context| context.activate_credential(ak, ek, credential, secret),
)
.map_err(KeylimeError::from);
ctx.flush_context(ek.into())?;
resp
}
// Returns TSS struct corresponding to an algorithm specified as a string, ex.
// the string from the keylime.conf file.
pub(crate) fn get_hash_alg(alg: String) -> Result<HashingAlgorithm> {
match alg.as_str() {
"sha256" => Ok(HashingAlgorithm::Sha256),
"sha1" => Ok(HashingAlgorithm::Sha1),
other => {
Err(KeylimeError::Other(format!("{:?} not implemented", alg)))
}
}
}
#[derive(Debug)]
pub(crate) enum TpmSigScheme {
AlgNull,
}
impl Default for TpmSigScheme {
fn default() -> Self {
TpmSigScheme::AlgNull
}
}
// Returns TSS struct corresponding to a signature scheme.
pub(crate) fn get_sig_scheme(
scheme: TpmSigScheme,
) -> Result<TPMT_SIG_SCHEME> {
match scheme {
// The TPM2_ALG_NULL sig scheme can be filled out with placeholder data
// in the details field.
TpmSigScheme::AlgNull => Ok(TPMT_SIG_SCHEME {
scheme: TPM2_ALG_NULL,
details: TPMU_SIG_SCHEME {
any: TPMS_SCHEME_HASH {
hashAlg: TPM2_ALG_NULL,
},
},
}),
_ => Err(KeylimeError::Other(format!(
"The signature scheme {:?} is not implemented",
scheme
))),
}
}
// Takes a public PKey and returns a DigestValue of it.
// Note: Currently, this creates a DigestValue including both SHA256 and
// SHA1 because these banks are checked by Keylime on the Python side.
pub(crate) fn pubkey_to_tpm_digest(
pubkey: &PKeyRef<Public>,
) -> Result<DigestValues> {
let mut keydigest = DigestValues::new();
let keybytes = match pubkey.id() {
Id::RSA => pubkey.rsa()?.public_key_to_pem()?,
other_id => {
return Err(KeylimeError::Other(format!(
"Converting to digest value for key type {:?} is not yet implemented",
other_id
)));
}
};
// SHA256
let mut hasher = openssl::sha::Sha256::new();
hasher.update(&keybytes);
let mut hashvec: Vec<u8> = hasher.finish().into();
keydigest.set(HashingAlgorithm::Sha256, Digest::try_from(hashvec)?);
// SHA1
let mut hasher = openssl::sha::Sha1::new();
hasher.update(&keybytes);
let mut hashvec: Vec<u8> = hasher.finish().into();
keydigest.set(HashingAlgorithm::Sha1, Digest::try_from(hashvec)?);
Ok(keydigest)
}
// Reads a mask in the form of some hex value, ex. "0x408000",
// translating bits that are set to pcrs to include in the list.
//
// The masks are sent from the tenant and cloud verifier to indicate
// the PCRs to include in a Quote. The LSB in the mask corresponds to
// PCR0. For example, keylime.conf specifies PCRs 15 and 22 under
// [tenant][tpm_policy]. As a bit mask, this would be represented as
// 0b010000001000000000000000, which translates to 0x408000.
//
// The mask is a string because it is sent as a string from the tenant
// and verifier. The output from this function can be used to call a
// Quote from the TSS ESAPI.
//
pub(crate) fn read_mask(mask: &str) -> Result<Vec<PcrSlot>> {
let mut pcrs = Vec::new();
let num = u32::from_str_radix(mask.trim_start_matches("0x"), 16)?;
// check which bits are set
for i in 0..32 {
if num & (1 << i) != 0 {
pcrs.push(
match i {
0 => PcrSlot::Slot0,
1 => PcrSlot::Slot1,
2 => PcrSlot::Slot2,
3 => PcrSlot::Slot3,
4 => PcrSlot::Slot4,
5 => PcrSlot::Slot5,
6 => PcrSlot::Slot6,
7 => PcrSlot::Slot7,
8 => PcrSlot::Slot8,
9 => PcrSlot::Slot9,
10 => PcrSlot::Slot10,
11 => PcrSlot::Slot11,
12 => PcrSlot::Slot12,
13 => PcrSlot::Slot13,
14 => PcrSlot::Slot14,
15 => PcrSlot::Slot15,
16 => PcrSlot::Slot16,
17 => PcrSlot::Slot17,
18 => PcrSlot::Slot18,
19 => PcrSlot::Slot19,
|
20 => PcrSlot::Slot20,
21 => PcrSlot::Slot21,
22 => PcrSlot::Slot22,
|
random_line_split
|
|
tpm.rs
|
iosyncratic format tpm2-tools expects. The lengths
// of the vectors were determined by introspection into running tpm2-tools code. This is
// not ideal, and we should aim to move away from it if possible.
pub(crate) fn pcrdata_to_vec(
selection_list: PcrSelectionList,
pcrdata: PcrData,
) -> Vec<u8> {
let pcrsel: TPML_PCR_SELECTION = selection_list.into();
let pcrsel_vec: [u8; 132] = unsafe { std::mem::transmute(pcrsel) };
let digest: TPML_DIGEST = pcrdata.into();
let digest_vec: [u8; 532] = unsafe { std::mem::transmute(digest) };
let mut data_vec =
Vec::with_capacity(pcrsel_vec.len() + 4 + digest_vec.len());
data_vec.extend(&pcrsel_vec);
data_vec.extend(&1u32.to_le_bytes());
data_vec.extend(&digest_vec);
data_vec
}
/* Converts a hex value in the form of a string (ex. from keylime.conf's
* ek_handle) to a key handle.
*
* Input: &str
* Return: Key handle
*
* Example call:
* let ek_handle = tpm::ek_from_hex_str("0x81000000");
*/
pub(crate) fn ek_from_hex_str(val: &str) -> Result<KeyHandle> {
let val = val.trim_start_matches("0x");
Ok(KeyHandle::from(u32::from_str_radix(val, 16)?))
}
/* Creates AK and returns a tuple of its handle, name, and tpm2b_public as a vector.
*
* Input: Connection context, parent key's KeyHandle.
* Return: (Key handle, key name, TPM public object as a vector)
* Example call:
* let (key, name, tpm_pub) = tpm::create_ak(context, ek_handle)
*/
pub(crate) fn create_ak(
ctx: &mut Context,
handle: KeyHandle,
) -> Result<(KeyHandle, Name, Vec<u8>)> {
let ak = ak::create_ak(
ctx,
handle,
HashingAlgorithm::Sha256,
SignatureScheme::RsaSsa,
None,
DefaultKey,
)?;
let ak_tpm2b_pub = ak.out_public;
let tpm2_pub_vec = pub_to_vec(ak_tpm2b_pub);
let ak_handle =
ak::load_ak(ctx, handle, None, ak.out_private, ak.out_public)?;
let (_, name, _) = ctx.read_public(ak_handle)?;
Ok((ak_handle, name, tpm2_pub_vec))
}
const TSS_MAGIC: u32 = 3135029470;
fn parse_cred_and_secret(
keyblob: Vec<u8>,
) -> Result<(IDObject, EncryptedSecret)> {
let magic = u32::from_be_bytes(keyblob[0..4].try_into().unwrap()); //#[allow_ci]
let version = u32::from_be_bytes(keyblob[4..8].try_into().unwrap()); //#[allow_ci]
if magic != TSS_MAGIC {
return Err(KeylimeError::Other(format!("Error parsing cred and secret; TSS_MAGIC number {} does not match expected value {}", magic, TSS_MAGIC)));
}
if version != 1 {
return Err(KeylimeError::Other(format!(
"Error parsing cred and secret; version {} is not 1",
version
)));
}
let credsize = u16::from_be_bytes(keyblob[8..10].try_into().unwrap()); //#[allow_ci]
let secretsize = u16::from_be_bytes(
keyblob[(10 + credsize as usize)..(12 + credsize as usize)]
.try_into()
.unwrap(), //#[allow_ci]
);
let credential = &keyblob[10..(10 + credsize as usize)];
let secret = &keyblob[(12 + credsize as usize)..];
let credential = IDObject::try_from(credential)?;
let secret = EncryptedSecret::try_from(secret)?;
Ok((credential, secret))
}
fn create_empty_session(
ctx: &mut Context,
ses_type: SessionType,
) -> Result<AuthSession> {
let session = ctx.start_auth_session(
None,
None,
None,
ses_type,
Cipher::aes_128_cfb().try_into()?,
HashingAlgorithm::Sha256,
)?;
let (ses_attrs, ses_attrs_mask) = SessionAttributesBuilder::new()
.with_encrypt(true)
.with_decrypt(true)
.build();
ctx.tr_sess_set_attributes(session.unwrap(), ses_attrs, ses_attrs_mask)?; //#[allow_ci]
Ok(session.unwrap()) //#[allow_ci]
}
pub(crate) fn activate_credential(
ctx: &mut Context,
keyblob: Vec<u8>,
ak: KeyHandle,
ek: KeyHandle,
) -> Result<Digest> {
let (credential, secret) = parse_cred_and_secret(keyblob)?;
let ek_auth = create_empty_session(ctx, SessionType::Policy)?;
// We authorize ses2 with PolicySecret(ENDORSEMENT) as per PolicyA
let _ = ctx.execute_with_nullauth_session(|context| {
context.policy_secret(
ek_auth.try_into()?,
AuthHandle::Endorsement,
Default::default(),
Default::default(),
Default::default(),
None,
)
})?;
let resp = ctx
.execute_with_sessions(
(Some(AuthSession::Password), Some(ek_auth), None),
|context| context.activate_credential(ak, ek, credential, secret),
)
.map_err(KeylimeError::from);
ctx.flush_context(ek.into())?;
resp
}
// Returns TSS struct corresponding to an algorithm specified as a string, ex.
// the string from the keylime.conf file.
pub(crate) fn get_hash_alg(alg: String) -> Result<HashingAlgorithm> {
match alg.as_str() {
"sha256" => Ok(HashingAlgorithm::Sha256),
"sha1" => Ok(HashingAlgorithm::Sha1),
other => {
Err(KeylimeError::Other(format!("{:?} not implemented", alg)))
}
}
}
#[derive(Debug)]
pub(crate) enum TpmSigScheme {
AlgNull,
}
impl Default for TpmSigScheme {
fn default() -> Self {
TpmSigScheme::AlgNull
}
}
// Returns TSS struct corresponding to a signature scheme.
pub(crate) fn get_sig_scheme(
scheme: TpmSigScheme,
) -> Result<TPMT_SIG_SCHEME> {
match scheme {
// The TPM2_ALG_NULL sig scheme can be filled out with placeholder data
// in the details field.
TpmSigScheme::AlgNull => Ok(TPMT_SIG_SCHEME {
scheme: TPM2_ALG_NULL,
details: TPMU_SIG_SCHEME {
any: TPMS_SCHEME_HASH {
hashAlg: TPM2_ALG_NULL,
},
},
}),
_ => Err(KeylimeError::Other(format!(
"The signature scheme {:?} is not implemented",
scheme
))),
}
}
// Takes a public PKey and returns a DigestValue of it.
// Note: Currently, this creates a DigestValue including both SHA256 and
// SHA1 because these banks are checked by Keylime on the Python side.
pub(crate) fn pubkey_to_tpm_digest(
pubkey: &PKeyRef<Public>,
) -> Result<DigestValues>
|
hasher.update(&keybytes);
let mut hashvec: Vec<u8> = hasher.finish().into();
keydigest.set(HashingAlgorithm::Sha1, Digest::try_from(hashvec)?);
Ok(keydigest)
}
// Reads a mask in the form of some hex value, ex. "0x408000",
// translating bits that are set to pcrs to include in the list.
//
// The masks are sent from the tenant and cloud verifier to indicate
// the PCRs to include in a Quote. The LSB in the mask corresponds to
// PCR0. For example, keylime.conf specifies PCRs 15 and 22 under
// [tenant][tpm_policy]. As a bit mask, this would be represented as
// 0b010000001000000000000000
|
{
let mut keydigest = DigestValues::new();
let keybytes = match pubkey.id() {
Id::RSA => pubkey.rsa()?.public_key_to_pem()?,
other_id => {
return Err(KeylimeError::Other(format!(
"Converting to digest value for key type {:?} is not yet implemented",
other_id
)));
}
};
// SHA256
let mut hasher = openssl::sha::Sha256::new();
hasher.update(&keybytes);
let mut hashvec: Vec<u8> = hasher.finish().into();
keydigest.set(HashingAlgorithm::Sha256, Digest::try_from(hashvec)?);
// SHA1
let mut hasher = openssl::sha::Sha1::new();
|
identifier_body
|
tpm.rs
|
iosyncratic format tpm2-tools expects. The lengths
// of the vectors were determined by introspection into running tpm2-tools code. This is
// not ideal, and we should aim to move away from it if possible.
pub(crate) fn pcrdata_to_vec(
selection_list: PcrSelectionList,
pcrdata: PcrData,
) -> Vec<u8> {
let pcrsel: TPML_PCR_SELECTION = selection_list.into();
let pcrsel_vec: [u8; 132] = unsafe { std::mem::transmute(pcrsel) };
let digest: TPML_DIGEST = pcrdata.into();
let digest_vec: [u8; 532] = unsafe { std::mem::transmute(digest) };
let mut data_vec =
Vec::with_capacity(pcrsel_vec.len() + 4 + digest_vec.len());
data_vec.extend(&pcrsel_vec);
data_vec.extend(&1u32.to_le_bytes());
data_vec.extend(&digest_vec);
data_vec
}
/* Converts a hex value in the form of a string (ex. from keylime.conf's
* ek_handle) to a key handle.
*
* Input: &str
* Return: Key handle
*
* Example call:
* let ek_handle = tpm::ek_from_hex_str("0x81000000");
*/
pub(crate) fn ek_from_hex_str(val: &str) -> Result<KeyHandle> {
let val = val.trim_start_matches("0x");
Ok(KeyHandle::from(u32::from_str_radix(val, 16)?))
}
/* Creates AK and returns a tuple of its handle, name, and tpm2b_public as a vector.
*
* Input: Connection context, parent key's KeyHandle.
* Return: (Key handle, key name, TPM public object as a vector)
* Example call:
* let (key, name, tpm_pub) = tpm::create_ak(context, ek_handle)
*/
pub(crate) fn create_ak(
ctx: &mut Context,
handle: KeyHandle,
) -> Result<(KeyHandle, Name, Vec<u8>)> {
let ak = ak::create_ak(
ctx,
handle,
HashingAlgorithm::Sha256,
SignatureScheme::RsaSsa,
None,
DefaultKey,
)?;
let ak_tpm2b_pub = ak.out_public;
let tpm2_pub_vec = pub_to_vec(ak_tpm2b_pub);
let ak_handle =
ak::load_ak(ctx, handle, None, ak.out_private, ak.out_public)?;
let (_, name, _) = ctx.read_public(ak_handle)?;
Ok((ak_handle, name, tpm2_pub_vec))
}
const TSS_MAGIC: u32 = 3135029470;
fn parse_cred_and_secret(
keyblob: Vec<u8>,
) -> Result<(IDObject, EncryptedSecret)> {
let magic = u32::from_be_bytes(keyblob[0..4].try_into().unwrap()); //#[allow_ci]
let version = u32::from_be_bytes(keyblob[4..8].try_into().unwrap()); //#[allow_ci]
if magic != TSS_MAGIC {
return Err(KeylimeError::Other(format!("Error parsing cred and secret; TSS_MAGIC number {} does not match expected value {}", magic, TSS_MAGIC)));
}
if version != 1 {
return Err(KeylimeError::Other(format!(
"Error parsing cred and secret; version {} is not 1",
version
)));
}
let credsize = u16::from_be_bytes(keyblob[8..10].try_into().unwrap()); //#[allow_ci]
let secretsize = u16::from_be_bytes(
keyblob[(10 + credsize as usize)..(12 + credsize as usize)]
.try_into()
.unwrap(), //#[allow_ci]
);
let credential = &keyblob[10..(10 + credsize as usize)];
let secret = &keyblob[(12 + credsize as usize)..];
let credential = IDObject::try_from(credential)?;
let secret = EncryptedSecret::try_from(secret)?;
Ok((credential, secret))
}
fn create_empty_session(
ctx: &mut Context,
ses_type: SessionType,
) -> Result<AuthSession> {
let session = ctx.start_auth_session(
None,
None,
None,
ses_type,
Cipher::aes_128_cfb().try_into()?,
HashingAlgorithm::Sha256,
)?;
let (ses_attrs, ses_attrs_mask) = SessionAttributesBuilder::new()
.with_encrypt(true)
.with_decrypt(true)
.build();
ctx.tr_sess_set_attributes(session.unwrap(), ses_attrs, ses_attrs_mask)?; //#[allow_ci]
Ok(session.unwrap()) //#[allow_ci]
}
pub(crate) fn activate_credential(
ctx: &mut Context,
keyblob: Vec<u8>,
ak: KeyHandle,
ek: KeyHandle,
) -> Result<Digest> {
let (credential, secret) = parse_cred_and_secret(keyblob)?;
let ek_auth = create_empty_session(ctx, SessionType::Policy)?;
// We authorize ses2 with PolicySecret(ENDORSEMENT) as per PolicyA
let _ = ctx.execute_with_nullauth_session(|context| {
context.policy_secret(
ek_auth.try_into()?,
AuthHandle::Endorsement,
Default::default(),
Default::default(),
Default::default(),
None,
)
})?;
let resp = ctx
.execute_with_sessions(
(Some(AuthSession::Password), Some(ek_auth), None),
|context| context.activate_credential(ak, ek, credential, secret),
)
.map_err(KeylimeError::from);
ctx.flush_context(ek.into())?;
resp
}
// Returns TSS struct corresponding to an algorithm specified as a string, ex.
// the string from the keylime.conf file.
pub(crate) fn get_hash_alg(alg: String) -> Result<HashingAlgorithm> {
match alg.as_str() {
"sha256" => Ok(HashingAlgorithm::Sha256),
"sha1" => Ok(HashingAlgorithm::Sha1),
other => {
Err(KeylimeError::Other(format!("{:?} not implemented", alg)))
}
}
}
#[derive(Debug)]
pub(crate) enum TpmSigScheme {
AlgNull,
}
impl Default for TpmSigScheme {
fn
|
() -> Self {
TpmSigScheme::AlgNull
}
}
// Returns TSS struct corresponding to a signature scheme.
pub(crate) fn get_sig_scheme(
scheme: TpmSigScheme,
) -> Result<TPMT_SIG_SCHEME> {
match scheme {
// The TPM2_ALG_NULL sig scheme can be filled out with placeholder data
// in the details field.
TpmSigScheme::AlgNull => Ok(TPMT_SIG_SCHEME {
scheme: TPM2_ALG_NULL,
details: TPMU_SIG_SCHEME {
any: TPMS_SCHEME_HASH {
hashAlg: TPM2_ALG_NULL,
},
},
}),
_ => Err(KeylimeError::Other(format!(
"The signature scheme {:?} is not implemented",
scheme
))),
}
}
// Takes a public PKey and returns a DigestValue of it.
// Note: Currently, this creates a DigestValue including both SHA256 and
// SHA1 because these banks are checked by Keylime on the Python side.
pub(crate) fn pubkey_to_tpm_digest(
pubkey: &PKeyRef<Public>,
) -> Result<DigestValues> {
let mut keydigest = DigestValues::new();
let keybytes = match pubkey.id() {
Id::RSA => pubkey.rsa()?.public_key_to_pem()?,
other_id => {
return Err(KeylimeError::Other(format!(
"Converting to digest value for key type {:?} is not yet implemented",
other_id
)));
}
};
// SHA256
let mut hasher = openssl::sha::Sha256::new();
hasher.update(&keybytes);
let mut hashvec: Vec<u8> = hasher.finish().into();
keydigest.set(HashingAlgorithm::Sha256, Digest::try_from(hashvec)?);
// SHA1
let mut hasher = openssl::sha::Sha1::new();
hasher.update(&keybytes);
let mut hashvec: Vec<u8> = hasher.finish().into();
keydigest.set(HashingAlgorithm::Sha1, Digest::try_from(hashvec)?);
Ok(keydigest)
}
// Reads a mask in the form of some hex value, ex. "0x408000",
// translating bits that are set to pcrs to include in the list.
//
// The masks are sent from the tenant and cloud verifier to indicate
// the PCRs to include in a Quote. The LSB in the mask corresponds to
// PCR0. For example, keylime.conf specifies PCRs 15 and 22 under
// [tenant][tpm_policy]. As a bit mask, this would be represented as
// 0b010000001000000000000000
|
default
|
identifier_name
|
size_cache_fs.go
|
os.FileInfo, err error) {
var lfi, bfi os.FileInfo
lfi, err = u.cache.Stat(name)
if err == nil {
if u.cacheTime == 0 {
return cacheHit, lfi, nil
}
// TODO checking even if shouldnt ?
if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
bfi, err = u.base.Stat(name)
if err != nil {
return cacheLocal, lfi, nil
}
if bfi.ModTime().After(lfi.ModTime()) {
return cacheStale, bfi, nil
}
}
return cacheHit, lfi, nil
} else if err == syscall.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
} else {
return cacheMiss, nil, err
}
}
func (u *SizeCacheFS) copyToCache(name string) (*cacheFile, error) {
// If layer file exists, we need to remove it
// and replace it with current file
// TODO
// Get size, if size over our limit, evict one file
bfh, err := u.base.Open(name)
if err != nil {
if err == os.ErrNotExist {
return nil, err
} else {
return nil, fmt.Errorf("error opening base file: %v", err)
}
}
// First make sure the directory exists
exists, err := Exists(u.cache, filepath.Dir(name))
if err != nil {
return nil, err
}
if !exists {
err = u.cache.MkdirAll(filepath.Dir(name), 0777) // FIXME?
if err != nil {
return nil, err
}
}
// Create the file on the overlay
lfh, err := u.cache.Create(name)
if err != nil {
return nil, err
}
n, err := io.Copy(lfh, bfh)
if err != nil {
// If anything fails, clean up the file
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, fmt.Errorf("error copying layer to base: %v", err)
}
bfi, err := bfh.Stat()
if err != nil || bfi.Size() != n {
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, syscall.EIO
}
isDir := bfi.IsDir()
err = lfh.Close()
if err != nil {
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, err
}
if err := bfh.Close(); err != nil {
return nil, fmt.Errorf("error closing base file: %v", err)
}
if err := u.cache.Chtimes(name, bfi.ModTime(), bfi.ModTime()); err != nil {
return nil, err
}
// if cache is stale and file already inside sorted set, we are just going to update it
// Create info
if !isDir {
info := &cacheFile{
Path: name,
Size: bfi.Size(),
LastAccessTime: time.Now().UnixNano() / 1000,
}
return info, nil
} else {
return nil, nil
}
}
func (u *SizeCacheFS) Chtimes(name string, atime, mtime time.Time) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
_ = u.cache.Chtimes(name, atime, mtime)
}
return u.base.Chtimes(name, atime, mtime)
}
func (u *SizeCacheFS) Chmod(name string, mode os.FileMode) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
_ = u.cache.Chmod(name, mode)
}
return u.base.Chmod(name, mode)
}
func (u *SizeCacheFS) Stat(name string) (os.FileInfo, error) {
return u.base.Stat(name)
}
func (u *SizeCacheFS) Rename(oldname, newname string) error {
exists, err := Exists(u.cache, oldname)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
info := u.getCacheFile(oldname)
u.removeFromCache(oldname)
info.Path = newname
if err := u.addToCache(info); err != nil {
return err
}
if err := u.cache.Rename(oldname, newname); err != nil {
return err
}
}
return u.base.Rename(oldname, newname)
}
func (u *SizeCacheFS) Remove(name string) error {
exists, err := Exists(u.cache, name)
if err != nil {
return fmt.Errorf("error determining if file exists: %v", err)
}
// If cache file exists, update to ensure consistency
if exists {
if err := u.cache.Remove(name); err != nil {
return fmt.Errorf("error removing cache file: %v", err)
}
u.removeFromCache(name)
}
return u.base.Remove(name)
}
func (u *SizeCacheFS) RemoveAll(name string) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
err := Walk(u.cache, name, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return u.Remove(path)
} else {
return nil
}
})
if err != nil {
return err
}
// Remove the dirs
_ = u.cache.RemoveAll(name)
}
return u.base.RemoveAll(name)
}
func (u *SizeCacheFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
// Very important, remove from cache to prevent eviction while opening
info := u.getCacheFile(name)
if info != nil {
u.removeFromCache(name)
}
st, _, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal, cacheHit:
default:
exists, err := Exists(u.base, name)
if err != nil {
return nil, fmt.Errorf("error determining if base file exists: %v", err)
}
if exists {
var err error
info, err = u.copyToCache(name)
if err != nil {
return nil, err
}
} else {
// It is not a dir, we cannot open a non existing dir
info = &cacheFile{
Path: name,
Size: 0,
LastAccessTime: time.Now().UnixNano() / 1000,
}
}
}
var cacheFlag = flag
if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
// Force read write mode
cacheFlag = (flag & (^os.O_WRONLY)) | os.O_RDWR
}
bfi, err := u.base.OpenFile(name, flag, perm)
if err != nil {
return nil, err
}
lfi, err := u.cache.OpenFile(name, cacheFlag, perm)
if err != nil {
bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...?
return nil, err
}
uf := NewSizeCacheFile(bfi, lfi, flag, u, info)
return uf, nil
}
func (u *SizeCacheFS) Open(name string) (File, error) {
// Very important, remove from cache to prevent eviction while opening
info := u.getCacheFile(name)
if info != nil {
u.removeFromCache(name)
}
st, fi, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal, cacheHit:
case cacheMiss:
bfi, err := u.base.Stat(name)
if err != nil {
return nil, err
}
if !bfi.IsDir() {
info, err = u.copyToCache(name)
if err != nil {
return nil, err
}
} else {
return u.base.Open(name)
}
case cacheStale:
if !fi.IsDir() {
info, err = u.copyToCache(name)
if err != nil {
return nil, err
}
} else {
return u.base.Open(name)
}
}
// the dirs from cacheHit, cacheStale fall down here:
bfile, _ := u.base.Open(name)
lfile, err := u.cache.Open(name)
if err != nil && bfile == nil {
return nil, err
}
fi, err = u.cache.Stat(name)
if err != nil
|
{
return nil, err
}
|
conditional_block
|
|
size_cache_fs.go
|
error removing cache file: %v", err)
}
u.currSize -= file.Size
path := filepath.Dir(file.Path)
for path != "" && path != "." && path != "/" {
f, err := u.cache.Open(path)
if err != nil {
_ = f.Close()
return fmt.Errorf("error opening parent directory: %v", err)
}
dirs, err := f.Readdir(-1)
if err != nil {
_ = f.Close()
return fmt.Errorf("error reading parent directory: %v", err)
}
_ = f.Close()
if len(dirs) == 0 {
if err := u.cache.Remove(path); err != nil {
return fmt.Errorf("error removing parent directory: %v", err)
}
path = filepath.Dir(path)
} else {
break
}
}
}
u.files.AddOrUpdate(info.Path, sortedset.SCORE(info.LastAccessTime), info)
u.currSize += info.Size
return nil
}
func (u *SizeCacheFS) removeFromCache(name string) {
u.cacheL.Lock()
defer u.cacheL.Unlock()
node := u.files.GetByKey(name)
if node != nil {
// If we remove file that is open, the file will re-add itself in
// the cache on close. This is expected behavior as a removed open file
// will re-appear on close ?
u.files.Remove(name)
info := node.Value.(*cacheFile)
u.currSize -= info.Size
}
}
/*
func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
var lfi, bfi os.FileInfo
lfi, err = u.layer.Stat(name)
if err == nil {
if u.cacheTime == 0 {
return cacheHit, lfi, nil
}
// TODO checking even if shouldnt ?
if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
bfi, err = u.base.Stat(name)
if err != nil {
return cacheLocal, lfi, nil
}
if bfi.ModTime().After(lfi.ModTime()) {
return cacheStale, bfi, nil
}
}
return cacheHit, lfi, nil
}
if err == syscall.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
}
return cacheMiss, nil, err
}
*/
func (u *SizeCacheFS) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
var lfi, bfi os.FileInfo
lfi, err = u.cache.Stat(name)
if err == nil {
if u.cacheTime == 0 {
return cacheHit, lfi, nil
}
// TODO checking even if shouldnt ?
if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
bfi, err = u.base.Stat(name)
if err != nil {
return cacheLocal, lfi, nil
}
if bfi.ModTime().After(lfi.ModTime()) {
return cacheStale, bfi, nil
}
}
return cacheHit, lfi, nil
} else if err == syscall.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
} else {
return cacheMiss, nil, err
}
}
func (u *SizeCacheFS) copyToCache(name string) (*cacheFile, error) {
// If layer file exists, we need to remove it
// and replace it with current file
// TODO
// Get size, if size over our limit, evict one file
bfh, err := u.base.Open(name)
if err != nil {
if err == os.ErrNotExist {
return nil, err
} else {
return nil, fmt.Errorf("error opening base file: %v", err)
}
}
// First make sure the directory exists
exists, err := Exists(u.cache, filepath.Dir(name))
if err != nil {
return nil, err
}
if !exists {
err = u.cache.MkdirAll(filepath.Dir(name), 0777) // FIXME?
if err != nil {
return nil, err
}
}
// Create the file on the overlay
lfh, err := u.cache.Create(name)
if err != nil {
return nil, err
}
n, err := io.Copy(lfh, bfh)
if err != nil {
// If anything fails, clean up the file
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, fmt.Errorf("error copying layer to base: %v", err)
}
bfi, err := bfh.Stat()
if err != nil || bfi.Size() != n {
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, syscall.EIO
}
isDir := bfi.IsDir()
err = lfh.Close()
if err != nil {
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, err
}
if err := bfh.Close(); err != nil {
return nil, fmt.Errorf("error closing base file: %v", err)
}
if err := u.cache.Chtimes(name, bfi.ModTime(), bfi.ModTime()); err != nil {
return nil, err
}
// if cache is stale and file already inside sorted set, we are just going to update it
// Create info
if !isDir {
info := &cacheFile{
Path: name,
Size: bfi.Size(),
LastAccessTime: time.Now().UnixNano() / 1000,
}
return info, nil
} else {
return nil, nil
}
}
func (u *SizeCacheFS) Chtimes(name string, atime, mtime time.Time) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
_ = u.cache.Chtimes(name, atime, mtime)
}
return u.base.Chtimes(name, atime, mtime)
}
func (u *SizeCacheFS) Chmod(name string, mode os.FileMode) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
_ = u.cache.Chmod(name, mode)
}
return u.base.Chmod(name, mode)
}
func (u *SizeCacheFS) Stat(name string) (os.FileInfo, error) {
return u.base.Stat(name)
}
func (u *SizeCacheFS) Rename(oldname, newname string) error {
exists, err := Exists(u.cache, oldname)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
info := u.getCacheFile(oldname)
u.removeFromCache(oldname)
info.Path = newname
if err := u.addToCache(info); err != nil {
return err
}
if err := u.cache.Rename(oldname, newname); err != nil {
return err
}
}
return u.base.Rename(oldname, newname)
}
func (u *SizeCacheFS) Remove(name string) error {
exists, err := Exists(u.cache, name)
if err != nil {
return fmt.Errorf("error determining if file exists: %v", err)
}
// If cache file exists, update to ensure consistency
if exists {
if err := u.cache.Remove(name); err != nil {
return fmt.Errorf("error removing cache file: %v", err)
}
u.removeFromCache(name)
}
return u.base.Remove(name)
}
func (u *SizeCacheFS) RemoveAll(name string) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
err := Walk(u.cache, name, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return u.Remove(path)
} else {
return nil
}
})
if err != nil {
return err
}
// Remove the dirs
_ = u.cache.RemoveAll(name)
}
return u.base.RemoveAll(name)
}
func (u *SizeCacheFS) OpenFile(name string, flag int, perm os.FileMode) (File, error)
|
{
// Very important, remove from cache to prevent eviction while opening
info := u.getCacheFile(name)
if info != nil {
u.removeFromCache(name)
}
st, _, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal, cacheHit:
default:
exists, err := Exists(u.base, name)
if err != nil {
return nil, fmt.Errorf("error determining if base file exists: %v", err)
}
|
identifier_body
|
|
size_cache_fs.go
|
.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
}
return cacheMiss, nil, err
}
*/
func (u *SizeCacheFS) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
var lfi, bfi os.FileInfo
lfi, err = u.cache.Stat(name)
if err == nil {
if u.cacheTime == 0 {
return cacheHit, lfi, nil
}
// TODO checking even if shouldnt ?
if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
bfi, err = u.base.Stat(name)
if err != nil {
return cacheLocal, lfi, nil
}
if bfi.ModTime().After(lfi.ModTime()) {
return cacheStale, bfi, nil
}
}
return cacheHit, lfi, nil
} else if err == syscall.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
} else {
return cacheMiss, nil, err
}
}
func (u *SizeCacheFS) copyToCache(name string) (*cacheFile, error) {
// If layer file exists, we need to remove it
// and replace it with current file
// TODO
// Get size, if size over our limit, evict one file
bfh, err := u.base.Open(name)
if err != nil {
if err == os.ErrNotExist {
return nil, err
} else {
return nil, fmt.Errorf("error opening base file: %v", err)
}
}
// First make sure the directory exists
exists, err := Exists(u.cache, filepath.Dir(name))
if err != nil {
return nil, err
}
if !exists {
err = u.cache.MkdirAll(filepath.Dir(name), 0777) // FIXME?
if err != nil {
return nil, err
}
}
// Create the file on the overlay
lfh, err := u.cache.Create(name)
if err != nil {
return nil, err
}
n, err := io.Copy(lfh, bfh)
if err != nil {
// If anything fails, clean up the file
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, fmt.Errorf("error copying layer to base: %v", err)
}
bfi, err := bfh.Stat()
if err != nil || bfi.Size() != n {
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, syscall.EIO
}
isDir := bfi.IsDir()
err = lfh.Close()
if err != nil {
_ = u.cache.Remove(name)
_ = lfh.Close()
return nil, err
}
if err := bfh.Close(); err != nil {
return nil, fmt.Errorf("error closing base file: %v", err)
}
if err := u.cache.Chtimes(name, bfi.ModTime(), bfi.ModTime()); err != nil {
return nil, err
}
// if cache is stale and file already inside sorted set, we are just going to update it
// Create info
if !isDir {
info := &cacheFile{
Path: name,
Size: bfi.Size(),
LastAccessTime: time.Now().UnixNano() / 1000,
}
return info, nil
} else {
return nil, nil
}
}
func (u *SizeCacheFS) Chtimes(name string, atime, mtime time.Time) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
_ = u.cache.Chtimes(name, atime, mtime)
}
return u.base.Chtimes(name, atime, mtime)
}
func (u *SizeCacheFS) Chmod(name string, mode os.FileMode) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
_ = u.cache.Chmod(name, mode)
}
return u.base.Chmod(name, mode)
}
func (u *SizeCacheFS) Stat(name string) (os.FileInfo, error) {
return u.base.Stat(name)
}
func (u *SizeCacheFS) Rename(oldname, newname string) error {
exists, err := Exists(u.cache, oldname)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
info := u.getCacheFile(oldname)
u.removeFromCache(oldname)
info.Path = newname
if err := u.addToCache(info); err != nil {
return err
}
if err := u.cache.Rename(oldname, newname); err != nil {
return err
}
}
return u.base.Rename(oldname, newname)
}
func (u *SizeCacheFS) Remove(name string) error {
exists, err := Exists(u.cache, name)
if err != nil {
return fmt.Errorf("error determining if file exists: %v", err)
}
// If cache file exists, update to ensure consistency
if exists {
if err := u.cache.Remove(name); err != nil {
return fmt.Errorf("error removing cache file: %v", err)
}
u.removeFromCache(name)
}
return u.base.Remove(name)
}
func (u *SizeCacheFS) RemoveAll(name string) error {
exists, err := Exists(u.cache, name)
if err != nil {
return err
}
// If cache file exists, update to ensure consistency
if exists {
err := Walk(u.cache, name, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
return u.Remove(path)
} else {
return nil
}
})
if err != nil {
return err
}
// Remove the dirs
_ = u.cache.RemoveAll(name)
}
return u.base.RemoveAll(name)
}
func (u *SizeCacheFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
// Very important, remove from cache to prevent eviction while opening
info := u.getCacheFile(name)
if info != nil {
u.removeFromCache(name)
}
st, _, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal, cacheHit:
default:
exists, err := Exists(u.base, name)
if err != nil {
return nil, fmt.Errorf("error determining if base file exists: %v", err)
}
if exists {
var err error
info, err = u.copyToCache(name)
if err != nil {
return nil, err
}
} else {
// It is not a dir, we cannot open a non existing dir
info = &cacheFile{
Path: name,
Size: 0,
LastAccessTime: time.Now().UnixNano() / 1000,
}
}
}
var cacheFlag = flag
if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
// Force read write mode
cacheFlag = (flag & (^os.O_WRONLY)) | os.O_RDWR
}
bfi, err := u.base.OpenFile(name, flag, perm)
if err != nil {
return nil, err
}
lfi, err := u.cache.OpenFile(name, cacheFlag, perm)
if err != nil {
bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...?
return nil, err
}
uf := NewSizeCacheFile(bfi, lfi, flag, u, info)
return uf, nil
}
func (u *SizeCacheFS) Open(name string) (File, error) {
// Very important, remove from cache to prevent eviction while opening
info := u.getCacheFile(name)
if info != nil {
u.removeFromCache(name)
}
st, fi, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal, cacheHit:
case cacheMiss:
bfi, err := u.base.Stat(name)
if err != nil {
return nil, err
}
if !bfi.IsDir() {
info, err = u.copyToCache(name)
if err != nil {
return nil, err
}
} else {
return u.base.Open(name)
}
case cacheStale:
if !fi.IsDir() {
info, err = u.copyToCache(name)
if err != nil {
return nil, err
}
} else {
return u.base.Open(name)
}
}
|
// the dirs from cacheHit, cacheStale fall down here:
bfile, _ := u.base.Open(name)
|
random_line_split
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.